From 57852824e2a8bb72372df5691c816264ed458ea3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 Oct 2023 21:46:13 +0000 Subject: [PATCH] Bump google.golang.org/grpc from 1.31.0 to 1.56.3 in /prometheus-to-sd Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.31.0 to 1.56.3. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.31.0...v1.56.3) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: indirect ... Signed-off-by: dependabot[bot] --- prometheus-to-sd/go.mod | 35 +- prometheus-to-sd/go.sum | 321 +- .../cloud.google.com/go/{ => compute}/LICENSE | 0 .../go/compute/internal/version.go | 18 + .../go/compute/metadata/CHANGES.md | 19 + .../go/compute/metadata/LICENSE | 202 + .../go/compute/metadata/README.md | 27 + .../go/compute/metadata/metadata.go | 52 +- .../go/compute/metadata/retry.go | 114 + .../go/compute/metadata/retry_linux.go | 26 + .../go/compute/metadata/tidyfix.go | 23 + .../github.com/cespare/xxhash/v2/.travis.yml | 8 - .../github.com/cespare/xxhash/v2/README.md | 37 +- .../github.com/cespare/xxhash/v2/testall.sh | 10 + .../github.com/cespare/xxhash/v2/xxhash.go | 48 +- .../cespare/xxhash/v2/xxhash_amd64.s | 336 +- .../cespare/xxhash/v2/xxhash_arm64.s | 183 + .../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 + .../cespare/xxhash/v2/xxhash_other.go | 22 +- .../cespare/xxhash/v2/xxhash_safe.go | 1 + .../cespare/xxhash/v2/xxhash_unsafe.go | 54 +- .../vendor/github.com/golang/glog/README | 44 - .../vendor/github.com/golang/glog/README.md | 36 + .../vendor/github.com/golang/glog/glog.go | 1219 +- .../github.com/golang/glog/glog_file.go | 305 +- .../github.com/golang/glog/glog_flags.go | 395 + .../golang/glog/internal/logsink/logsink.go | 387 + .../glog/internal/logsink/logsink_fatal.go | 35 + .../glog/internal/stackdump/stackdump.go | 127 + .../golang/protobuf/jsonpb/decode.go | 530 + .../golang/protobuf/jsonpb/encode.go | 559 + .../github.com/golang/protobuf/jsonpb/json.go | 69 + .../vendor/github.com/google/uuid/.travis.yml | 9 + .../github.com/google/uuid/CONTRIBUTING.md | 10 + .../github.com/google/uuid/CONTRIBUTORS | 9 + .../vendor/github.com/google/uuid/LICENSE | 27 + .../vendor/github.com/google/uuid/README.md | 19 + .../vendor/github.com/google/uuid/dce.go | 80 + .../vendor/github.com/google/uuid/doc.go | 12 + .../vendor/github.com/google/uuid/hash.go | 53 + .../vendor/github.com/google/uuid/marshal.go | 38 + .../vendor/github.com/google/uuid/node.go | 90 + .../vendor/github.com/google/uuid/node_js.go | 12 + .../vendor/github.com/google/uuid/node_net.go | 33 + .../vendor/github.com/google/uuid/null.go | 118 + .../vendor/github.com/google/uuid/sql.go | 59 + .../vendor/github.com/google/uuid/time.go | 123 + .../vendor/github.com/google/uuid/util.go | 43 + .../vendor/github.com/google/uuid/uuid.go | 294 + .../vendor/github.com/google/uuid/version1.go | 44 + .../vendor/github.com/google/uuid/version4.go | 76 + .../enterprise-certificate-proxy/LICENSE | 202 + .../client/client.go | 185 + .../client/util/util.go | 91 + .../gax-go/v2/.release-please-manifest.json | 3 + .../googleapis/gax-go/v2/CHANGES.md | 54 + .../googleapis/gax-go/v2/apierror/apierror.go | 347 + .../v2/apierror/internal/proto/README.md | 30 + .../internal/proto/custom_error.pb.go | 256 + .../internal/proto/custom_error.proto | 50 + .../v2/apierror/internal/proto/error.pb.go | 280 + .../v2/apierror/internal/proto/error.proto | 46 + .../googleapis/gax-go/v2/call_option.go | 101 +- .../googleapis/gax-go/v2/content_type.go | 112 + .../github.com/googleapis/gax-go/v2/gax.go | 4 +- .../googleapis/gax-go/v2/internal/version.go | 33 + .../github.com/googleapis/gax-go/v2/invoke.go | 15 +- .../googleapis/gax-go/v2/proto_json_stream.go | 126 + .../gax-go/v2/release-please-config.json | 10 + .../vendor/go.opencensus.io/.travis.yml | 17 - .../vendor/go.opencensus.io/Makefile | 8 +- .../vendor/go.opencensus.io/opencensus.go | 2 +- .../go.opencensus.io/plugin/ochttp/server.go | 8 +- .../vendor/go.opencensus.io/stats/doc.go | 7 +- .../go.opencensus.io/stats/internal/record.go | 6 + .../vendor/go.opencensus.io/stats/record.go | 21 +- .../stats/view/aggregation.go | 24 +- .../stats/view/aggregation_data.go | 55 +- .../go.opencensus.io/stats/view/collector.go | 11 +- .../vendor/go.opencensus.io/stats/view/doc.go | 2 +- .../stats/view/view_to_metric.go | 13 +- .../go.opencensus.io/stats/view/worker.go | 63 +- .../vendor/go.opencensus.io/tag/profile_19.go | 1 + .../go.opencensus.io/tag/profile_not19.go | 1 + .../go.opencensus.io/trace/basetypes.go | 10 + .../vendor/go.opencensus.io/trace/doc.go | 13 +- .../vendor/go.opencensus.io/trace/lrumap.go | 2 +- .../go.opencensus.io/trace/spanstore.go | 14 +- .../vendor/go.opencensus.io/trace/trace.go | 179 +- .../go.opencensus.io/trace/trace_api.go | 265 + .../go.opencensus.io/trace/trace_go11.go | 1 + .../go.opencensus.io/trace/trace_nongo11.go | 1 + .../x/net/context/ctxhttp/ctxhttp.go | 71 - .../vendor/golang.org/x/net/http2/pipe.go | 6 +- .../vendor/golang.org/x/net/http2/server.go | 7 +- .../golang.org/x/net/http2/transport.go | 11 +- .../vendor/golang.org/x/oauth2/AUTHORS | 3 - .../vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - .../vendor/golang.org/x/oauth2/README.md | 12 +- .../x/oauth2/authhandler/authhandler.go | 44 +- .../golang.org/x/oauth2/google/default.go | 66 +- .../vendor/golang.org/x/oauth2/google/doc.go | 78 +- .../golang.org/x/oauth2/google/error.go | 64 + .../golang.org/x/oauth2/google/google.go | 34 +- .../google/internal/externalaccount/aws.go | 165 +- .../externalaccount/basecredentials.go | 71 +- .../externalaccount/executablecredsource.go | 309 + .../internal/externalaccount/impersonate.go | 9 +- .../vendor/golang.org/x/oauth2/google/jwt.go | 3 +- .../golang.org/x/oauth2/internal/token.go | 4 +- .../vendor/golang.org/x/oauth2/jws/jws.go | 2 +- .../vendor/golang.org/x/oauth2/oauth2.go | 33 +- .../vendor/golang.org/x/oauth2/token.go | 14 +- .../golang.org/x/sys/unix/ioctl_signed.go | 70 + .../sys/unix/{ioctl.go => ioctl_unsigned.go} | 4 +- .../vendor/golang.org/x/sys/unix/ioctl_zos.go | 12 +- .../vendor/golang.org/x/sys/unix/mkerrors.sh | 2 + .../golang.org/x/sys/unix/syscall_aix.go | 4 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - .../x/sys/unix/syscall_aix_ppc64.go | 1 - .../golang.org/x/sys/unix/syscall_darwin.go | 3 +- .../x/sys/unix/syscall_dragonfly.go | 1 - .../golang.org/x/sys/unix/syscall_freebsd.go | 1 - .../golang.org/x/sys/unix/syscall_linux.go | 10 +- .../x/sys/unix/syscall_linux_386.go | 27 - .../x/sys/unix/syscall_linux_amd64.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 27 - .../x/sys/unix/syscall_linux_arm64.go | 10 - .../x/sys/unix/syscall_linux_loong64.go | 5 - .../x/sys/unix/syscall_linux_mips64x.go | 1 - .../x/sys/unix/syscall_linux_mipsx.go | 27 - .../x/sys/unix/syscall_linux_ppc.go | 27 - .../x/sys/unix/syscall_linux_ppc64x.go | 1 - .../x/sys/unix/syscall_linux_riscv64.go | 1 - .../x/sys/unix/syscall_linux_s390x.go | 1 - .../x/sys/unix/syscall_linux_sparc64.go | 1 - .../golang.org/x/sys/unix/syscall_netbsd.go | 2 - .../golang.org/x/sys/unix/syscall_openbsd.go | 1 - .../golang.org/x/sys/unix/syscall_solaris.go | 21 +- .../golang.org/x/sys/unix/syscall_unix.go | 7 + .../x/sys/unix/syscall_zos_s390x.go | 4 +- .../x/sys/unix/zerrors_darwin_amd64.go | 19 + .../x/sys/unix/zerrors_darwin_arm64.go | 19 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 15 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 18 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 10 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 10 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 39 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 11 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 39 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 11 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 10 - .../x/sys/unix/zsyscall_freebsd_386.go | 10 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 10 - .../x/sys/unix/zsyscall_freebsd_arm.go | 10 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 10 - .../x/sys/unix/zsyscall_freebsd_riscv64.go | 10 - .../golang.org/x/sys/unix/zsyscall_linux.go | 10 - .../x/sys/unix/zsyscall_linux_386.go | 10 - .../x/sys/unix/zsyscall_linux_amd64.go | 10 - .../x/sys/unix/zsyscall_linux_arm.go | 10 - .../x/sys/unix/zsyscall_linux_arm64.go | 10 - .../x/sys/unix/zsyscall_linux_mips.go | 10 - .../x/sys/unix/zsyscall_linux_mips64.go | 10 - .../x/sys/unix/zsyscall_linux_mips64le.go | 10 - .../x/sys/unix/zsyscall_linux_mipsle.go | 10 - .../x/sys/unix/zsyscall_linux_ppc.go | 10 - .../x/sys/unix/zsyscall_linux_ppc64.go | 10 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 10 - .../x/sys/unix/zsyscall_linux_riscv64.go | 10 - .../x/sys/unix/zsyscall_linux_s390x.go | 10 - .../x/sys/unix/zsyscall_linux_sparc64.go | 10 - .../x/sys/unix/zsyscall_netbsd_386.go | 10 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 10 - .../x/sys/unix/zsyscall_netbsd_arm.go | 10 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 10 - .../x/sys/unix/zsyscall_openbsd_386.go | 14 - .../x/sys/unix/zsyscall_openbsd_386.s | 5 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 14 - .../x/sys/unix/zsyscall_openbsd_amd64.s | 5 - .../x/sys/unix/zsyscall_openbsd_arm.go | 14 - .../x/sys/unix/zsyscall_openbsd_arm.s | 5 - .../x/sys/unix/zsyscall_openbsd_arm64.go | 14 - .../x/sys/unix/zsyscall_openbsd_arm64.s | 5 - .../x/sys/unix/zsyscall_openbsd_mips64.go | 14 - .../x/sys/unix/zsyscall_openbsd_mips64.s | 5 - .../x/sys/unix/zsyscall_openbsd_ppc64.go | 14 - .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 - .../x/sys/unix/zsyscall_openbsd_riscv64.go | 14 - .../x/sys/unix/zsyscall_openbsd_riscv64.s | 5 - .../x/sys/unix/zsyscall_solaris_amd64.go | 17 +- .../x/sys/unix/zsyscall_zos_s390x.go | 4 +- .../x/sys/unix/ztypes_darwin_amd64.go | 11 + .../x/sys/unix/ztypes_darwin_arm64.go | 11 + .../golang.org/x/sys/windows/types_windows.go | 4 +- .../api/googleapi/googleapi.go | 73 +- .../api/googleapi/transport/apikey.go | 2 +- .../api/internal/cert/default_cert.go | 58 + .../api/internal/cert/enterprise_cert.go | 54 + .../api/internal/cert/secureconnect_cert.go | 123 + .../google.golang.org/api/internal/creds.go | 188 +- .../google.golang.org/api/internal/dca.go | 144 + .../api/internal/gensupport/error.go | 24 + .../api/internal/gensupport/json.go | 33 +- .../api/internal/gensupport/media.go | 140 +- .../api/internal/gensupport/params.go | 10 +- .../api/internal/gensupport/resumable.go | 128 +- .../api/internal/gensupport/retry.go | 121 + .../internal/gensupport/retryable_linux.go | 1 + .../api/internal/gensupport/send.go | 126 +- .../api/internal/impersonate/impersonate.go | 128 + .../api/internal/service-account.json | 12 - .../api/internal/settings.go | 76 +- .../google.golang.org/api/internal/version.go | 8 + .../api/monitoring/v3/monitoring-api.json | 1741 ++- .../api/monitoring/v3/monitoring-gen.go | 10283 ++++++++++++---- .../api/option/credentials_go19.go | 23 - .../api/option/credentials_notgo19.go | 22 - .../option/internaloption/internaloption.go | 93 +- .../google.golang.org/api/option/option.go | 80 +- .../api/transport/cert/default_cert.go | 110 - .../transport/http/default_transport_go113.go | 20 - .../http/default_transport_not_go113.go | 15 - .../api/transport/http/dial.go | 129 +- .../api/transport/http/dial_appengine.go | 1 + .../http/internal/propagation/http.go | 1 + .../genproto/googleapis/rpc/code/code.pb.go | 336 + .../rpc/errdetails/error_details.pb.go | 1314 ++ .../googleapis/rpc/status/status.pb.go | 17 +- .../vendor/google.golang.org/grpc/.travis.yml | 42 - .../google.golang.org/grpc/CONTRIBUTING.md | 33 +- .../google.golang.org/grpc/MAINTAINERS.md | 5 +- .../vendor/google.golang.org/grpc/Makefile | 33 +- .../vendor/google.golang.org/grpc/NOTICE.txt | 13 + .../vendor/google.golang.org/grpc/README.md | 6 +- .../vendor/google.golang.org/grpc/SECURITY.md | 3 + .../grpc/attributes/attributes.go | 112 +- .../vendor/google.golang.org/grpc/backoff.go | 5 +- .../grpc/balancer/balancer.go | 156 +- .../grpc/balancer/base/balancer.go | 49 +- .../grpc/balancer/conn_state_evaluator.go | 74 + .../grpc/balancer/grpclb/state/state.go | 2 +- .../grpc/balancer/roundrobin/roundrobin.go | 20 +- .../grpc/balancer_conn_wrappers.go | 475 +- .../grpc_binarylog_v1/binarylog.pb.go | 1261 +- .../vendor/google.golang.org/grpc/call.go | 5 + .../grpc/channelz/channelz.go | 36 + .../google.golang.org/grpc/clientconn.go | 1442 ++- .../grpc/codes/code_string.go | 51 +- .../grpc/connectivity/connectivity.go | 47 +- .../grpc/credentials/credentials.go | 88 +- .../grpc/credentials/go12.go | 30 - .../grpc/credentials/insecure/insecure.go | 98 + .../google.golang.org/grpc/credentials/tls.go | 51 +- .../google.golang.org/grpc/dialoptions.go | 251 +- .../grpc/encoding/encoding.go | 19 +- .../grpc/encoding/proto/proto.go | 70 +- .../grpc/grpclog/component.go | 2 +- .../grpc/grpclog/loggerv2.go | 106 +- .../vendor/google.golang.org/grpc/idle.go | 287 + .../google.golang.org/grpc/install_gae.sh | 6 - .../google.golang.org/grpc/interceptor.go | 45 +- .../balancer/gracefulswitch/gracefulswitch.go | 384 + .../grpc/internal/binarylog/binarylog.go | 118 +- .../grpc/internal/binarylog/env_config.go | 26 +- .../grpc/internal/binarylog/method_logger.go | 163 +- .../grpc/internal/binarylog/sink.go | 115 +- .../grpc/internal/buffer/unbounded.go | 26 +- .../grpc/internal/channelz/funcs.go | 242 +- .../grpc/internal/channelz/id.go | 75 + .../grpc/internal/channelz/logging.go | 91 +- .../grpc/internal/channelz/types.go | 47 +- .../grpc/internal/channelz/types_linux.go | 2 - .../grpc/internal/channelz/types_nonlinux.go | 5 +- .../grpc/internal/channelz/util_linux.go | 2 - .../grpc/internal/channelz/util_nonlinux.go | 3 +- .../grpc/internal/credentials/credentials.go | 49 + .../credentials/{go110.go => spiffe.go} | 28 +- .../credentials}/syscallconn.go | 5 +- .../grpc/internal/credentials/util.go | 52 + .../grpc/internal/envconfig/envconfig.go | 46 +- .../grpc/internal/envconfig/observability.go | 42 + .../grpc/internal/envconfig/xds.go | 95 + .../grpc/internal/grpclog/grpclog.go | 13 +- .../grpc/internal/grpclog/prefixLogger.go | 12 + .../grpc/internal/grpcrand/grpcrand.go | 50 +- .../internal/grpcsync/callback_serializer.go | 119 + .../gobefore110.go => grpcsync/oncefunc.go} | 19 +- .../grpc/internal/grpcutil/compressor.go | 47 + .../grpcutil/grpcutil.go} | 16 +- .../grpc/internal/grpcutil/method.go | 6 +- .../dns/go113.go => grpcutil/regex.go} | 22 +- .../grpc/internal/grpcutil/target.go | 55 - .../grpc/internal/internal.go | 138 +- .../grpc/internal/metadata/metadata.go | 132 + .../grpc/internal/pretty/pretty.go | 82 + .../grpc/internal/resolver/config_selector.go | 167 + .../internal/resolver/dns/dns_resolver.go | 58 +- .../resolver/passthrough/passthrough.go | 11 +- .../grpc/internal/resolver/unix/unix.go | 74 + .../grpc/internal/serviceconfig/duration.go | 130 + .../internal/serviceconfig/serviceconfig.go | 102 +- .../grpc/internal/status/status.go | 24 +- .../grpc/internal/syscall/syscall_linux.go | 24 +- .../grpc/internal/syscall/syscall_nonlinux.go | 23 +- .../grpc/internal/transport/controlbuf.go | 197 +- .../grpc/internal/transport/defaults.go | 6 + .../grpc/internal/transport/flowcontrol.go | 4 +- .../grpc/internal/transport/handler_server.go | 81 +- .../grpc/internal/transport/http2_client.go | 753 +- .../grpc/internal/transport/http2_server.go | 602 +- .../grpc/internal/transport/http_util.go | 317 +- .../grpc/internal/transport/logging.go | 40 + .../transport/networktype/networktype.go | 46 + .../grpc/{ => internal/transport}/proxy.go | 54 +- .../grpc/internal/transport/transport.go | 80 +- .../grpc/internal/xds_handshake_cluster.go | 40 + .../grpc/metadata/metadata.go | 174 +- .../google.golang.org/grpc/picker_wrapper.go | 77 +- .../google.golang.org/grpc/pickfirst.go | 181 +- .../google.golang.org/grpc/preloader.go | 5 +- .../google.golang.org/grpc/regenerate.sh | 80 +- .../google.golang.org/grpc/resolver/map.go | 138 + .../grpc/resolver/resolver.go | 116 +- .../grpc/resolver_conn_wrapper.go | 273 +- .../vendor/google.golang.org/grpc/rpc_util.go | 267 +- .../vendor/google.golang.org/grpc/server.go | 898 +- .../google.golang.org/grpc/service_config.go | 165 +- .../grpc/serviceconfig/serviceconfig.go | 5 +- .../google.golang.org/grpc/stats/stats.go | 33 +- .../google.golang.org/grpc/status/status.go | 81 +- .../vendor/google.golang.org/grpc/stream.go | 695 +- .../vendor/google.golang.org/grpc/tap/tap.go | 23 +- .../vendor/google.golang.org/grpc/version.go | 2 +- .../vendor/google.golang.org/grpc/vet.sh | 118 +- .../protobuf/encoding/protojson/decode.go | 665 + .../protobuf/encoding/protojson/doc.go | 11 + .../protobuf/encoding/protojson/encode.go | 343 + .../encoding/protojson/well_known_types.go | 895 ++ .../protobuf/encoding/protowire/wire.go | 8 +- .../protobuf/internal/encoding/json/decode.go | 340 + .../internal/encoding/json/decode_number.go | 254 + .../internal/encoding/json/decode_string.go | 91 + .../internal/encoding/json/decode_token.go | 192 + .../protobuf/internal/encoding/json/encode.go | 276 + .../protobuf/internal/encoding/text/decode.go | 5 +- .../internal/encoding/text/decode_number.go | 43 +- .../protobuf/internal/genid/descriptor_gen.go | 90 +- .../protobuf/internal/impl/convert.go | 1 - .../protobuf/internal/strs/strings_unsafe.go | 2 +- .../protobuf/internal/version/version.go | 4 +- .../google.golang.org/protobuf/proto/doc.go | 9 +- .../google.golang.org/protobuf/proto/equal.go | 172 +- .../reflect/protoreflect/source_gen.go | 14 + .../protobuf/reflect/protoreflect/value.go | 2 +- .../reflect/protoreflect/value_equal.go | 168 + .../reflect/protoreflect/value_union.go | 4 +- .../reflect/protoregistry/registry.go | 2 +- .../types/descriptorpb/descriptor.pb.go | 1547 ++- .../protobuf/types/known/anypb/any.pb.go | 135 +- .../types/known/durationpb/duration.pb.go | 63 +- .../types/known/timestamppb/timestamp.pb.go | 61 +- prometheus-to-sd/vendor/modules.txt | 77 +- 363 files changed, 34346 insertions(+), 11142 deletions(-) rename prometheus-to-sd/vendor/cloud.google.com/go/{ => compute}/LICENSE (100%) create mode 100644 prometheus-to-sd/vendor/cloud.google.com/go/compute/internal/version.go create mode 100644 prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/CHANGES.md create mode 100644 prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/LICENSE create mode 100644 prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/README.md create mode 100644 prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/retry.go create mode 100644 prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/retry_linux.go create mode 100644 prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/tidyfix.go delete mode 100644 prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/.travis.yml create mode 100644 prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/testall.sh create mode 100644 prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s rename prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%) delete mode 100644 prometheus-to-sd/vendor/github.com/golang/glog/README create mode 100644 prometheus-to-sd/vendor/github.com/golang/glog/README.md create mode 100644 prometheus-to-sd/vendor/github.com/golang/glog/glog_flags.go create mode 100644 prometheus-to-sd/vendor/github.com/golang/glog/internal/logsink/logsink.go create mode 100644 prometheus-to-sd/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go create mode 100644 prometheus-to-sd/vendor/github.com/golang/glog/internal/stackdump/stackdump.go create mode 100644 prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/decode.go create mode 100644 prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/encode.go create mode 100644 prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/json.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/.travis.yml create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/LICENSE create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/README.md create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/dce.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/doc.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/hash.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/marshal.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/node.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/node_js.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/node_net.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/null.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/sql.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/time.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/util.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/uuid.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/version1.go create mode 100644 prometheus-to-sd/vendor/github.com/google/uuid/version4.go create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/CHANGES.md create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/content_type.go create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/internal/version.go create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go create mode 100644 prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/release-please-config.json delete mode 100644 prometheus-to-sd/vendor/go.opencensus.io/.travis.yml create mode 100644 prometheus-to-sd/vendor/go.opencensus.io/trace/trace_api.go delete mode 100644 prometheus-to-sd/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go delete mode 100644 prometheus-to-sd/vendor/golang.org/x/oauth2/AUTHORS delete mode 100644 prometheus-to-sd/vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 prometheus-to-sd/vendor/golang.org/x/oauth2/google/error.go create mode 100644 prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go create mode 100644 prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl_signed.go rename prometheus-to-sd/vendor/golang.org/x/sys/unix/{ioctl.go => ioctl_unsigned.go} (92%) create mode 100644 prometheus-to-sd/vendor/google.golang.org/api/internal/cert/default_cert.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/api/internal/cert/enterprise_cert.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/api/internal/dca.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/error.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/retry.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/api/internal/impersonate/impersonate.go delete mode 100644 prometheus-to-sd/vendor/google.golang.org/api/internal/service-account.json create mode 100644 prometheus-to-sd/vendor/google.golang.org/api/internal/version.go delete mode 100644 prometheus-to-sd/vendor/google.golang.org/api/option/credentials_go19.go delete mode 100644 prometheus-to-sd/vendor/google.golang.org/api/option/credentials_notgo19.go delete mode 100644 prometheus-to-sd/vendor/google.golang.org/api/transport/cert/default_cert.go delete mode 100644 prometheus-to-sd/vendor/google.golang.org/api/transport/http/default_transport_go113.go delete mode 100644 prometheus-to-sd/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go delete mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/.travis.yml create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/NOTICE.txt create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/SECURITY.md create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/channelz/channelz.go delete mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/credentials/go12.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/credentials/insecure/insecure.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/idle.go delete mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/install_gae.sh create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/id.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/credentials.go rename prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/{go110.go => spiffe.go} (67%) rename prometheus-to-sd/vendor/google.golang.org/grpc/{credentials/internal => internal/credentials}/syscallconn.go (94%) create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/util.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/observability.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/xds.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go rename prometheus-to-sd/vendor/google.golang.org/grpc/internal/{credentials/gobefore110.go => grpcsync/oncefunc.go} (67%) create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go rename prometheus-to-sd/vendor/google.golang.org/grpc/{credentials/internal/syscallconn_appengine.go => internal/grpcutil/grpcutil.go} (72%) rename prometheus-to-sd/vendor/google.golang.org/grpc/internal/{resolver/dns/go113.go => grpcutil/regex.go} (63%) delete mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/target.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/metadata/metadata.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/pretty/pretty.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/config_selector.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/logging.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go rename prometheus-to-sd/vendor/google.golang.org/grpc/{ => internal/transport}/proxy.go (71%) create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/grpc/resolver/map.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/decode.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/doc.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/encode.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go create mode 100644 prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go diff --git a/prometheus-to-sd/go.mod b/prometheus-to-sd/go.mod index 68aa9e1db..434066e60 100644 --- a/prometheus-to-sd/go.mod +++ b/prometheus-to-sd/go.mod @@ -3,22 +3,23 @@ module github.com/GoogleCloudPlatform/k8s-stackdriver/prometheus-to-sd go 1.20 require ( - cloud.google.com/go v0.65.0 - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + cloud.google.com/go/compute/metadata v0.2.3 + github.com/golang/glog v1.1.0 github.com/prometheus/client_golang v1.11.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.26.0 - github.com/stretchr/testify v1.8.0 - golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b - google.golang.org/api v0.30.0 + github.com/stretchr/testify v1.8.1 + golang.org/x/oauth2 v0.7.0 + google.golang.org/api v0.114.0 k8s.io/api v0.26.2 k8s.io/apimachinery v0.26.2 k8s.io/client-go v0.26.2 ) require ( + cloud.google.com/go/compute v1.19.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -27,11 +28,13 @@ require ( github.com/go-openapi/swag v0.19.14 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect @@ -41,16 +44,16 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/procfs v0.6.0 // indirect - go.opencensus.io v0.22.4 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/term v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154 // indirect - google.golang.org/grpc v1.31.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.56.3 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/prometheus-to-sd/go.sum b/prometheus-to-sd/go.sum index 8490982b0..a95fb60b0 100644 --- a/prometheus-to-sd/go.sum +++ b/prometheus-to-sd/go.sum @@ -1,39 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -44,11 +17,9 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -62,9 +33,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -86,26 +54,16 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -115,19 +73,17 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= @@ -135,22 +91,13 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -159,8 +106,6 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -219,7 +164,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -228,58 +172,28 @@ github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -289,217 +203,81 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154 h1:bFFRpT+e8JJVY7lMMfvezL1ZIwqiwmPl2bsE2yx4HqM= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -512,15 +290,14 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -536,12 +313,7 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= @@ -554,9 +326,6 @@ k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+O k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/prometheus-to-sd/vendor/cloud.google.com/go/LICENSE b/prometheus-to-sd/vendor/cloud.google.com/go/compute/LICENSE similarity index 100% rename from prometheus-to-sd/vendor/cloud.google.com/go/LICENSE rename to prometheus-to-sd/vendor/cloud.google.com/go/compute/LICENSE diff --git a/prometheus-to-sd/vendor/cloud.google.com/go/compute/internal/version.go b/prometheus-to-sd/vendor/cloud.google.com/go/compute/internal/version.go new file mode 100644 index 000000000..a5b020992 --- /dev/null +++ b/prometheus-to-sd/vendor/cloud.google.com/go/compute/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.19.1" diff --git a/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/CHANGES.md new file mode 100644 index 000000000..06b957349 --- /dev/null +++ b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -0,0 +1,19 @@ +# Changes + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) + + +### Bug Fixes + +* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + +## [0.1.0] (2022-10-26) + +Initial release of metadata being it's own module. diff --git a/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/LICENSE b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/README.md b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/README.md new file mode 100644 index 000000000..f940fb2c8 --- /dev/null +++ b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/README.md @@ -0,0 +1,27 @@ +# Compute API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) + +This is a utility library for communicating with Google Cloud metadata service +on Google Cloud. + +## Install + +```bash +go get cloud.google.com/go/compute/metadata +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/metadata.go b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/metadata.go index 545bd9d37..c17faa142 100644 --- a/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -16,7 +16,7 @@ // metadata and API service accounts. // // This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. +// as documented at https://cloud.google.com/compute/docs/metadata/overview. package metadata // import "cloud.google.com/go/compute/metadata" import ( @@ -61,14 +61,20 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -var defaultClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, -}} +var defaultClient = &Client{hc: newDefaultHTTPClient()} + +func newDefaultHTTPClient() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + IdleConnTimeout: 60 * time.Second, + }, + Timeout: 5 * time.Second, + } +} // NotDefinedError is returned when requested metadata is not defined. // @@ -130,7 +136,7 @@ func testOnGCE() bool { go func() { req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) req.Header.Set("User-Agent", userAgent) - res, err := defaultClient.hc.Do(req.WithContext(ctx)) + res, err := newDefaultHTTPClient().Do(req.WithContext(ctx)) if err != nil { resc <- false return @@ -140,7 +146,8 @@ func testOnGCE() bool { }() go func() { - addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal") + resolver := &net.Resolver{} + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") if err != nil || len(addrs) == 0 { resc <- false return @@ -282,6 +289,7 @@ func NewClient(c *http.Client) *Client { // getETag returns a value from the metadata service as well as the associated ETag. // This func is otherwise equivalent to Get. func (c *Client) getETag(suffix string) (value, etag string, err error) { + ctx := context.TODO() // Using a fixed IP makes it very difficult to spoof the metadata service in // a container, which is an important use-case for local testing of cloud // deployments. To enable spoofing of the metadata service, the environment @@ -304,9 +312,25 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { } req.Header.Set("Metadata-Flavor", "Google") req.Header.Set("User-Agent", userAgent) - res, err := c.hc.Do(req) - if err != nil { - return "", "", err + var res *http.Response + var reqErr error + retryer := newRetryer() + for { + res, reqErr = c.hc.Do(req) + var code int + if res != nil { + code = res.StatusCode + } + if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if err := sleep(ctx, delay); err != nil { + return "", "", err + } + continue + } + break + } + if reqErr != nil { + return "", "", reqErr } defer res.Body.Close() if res.StatusCode == http.StatusNotFound { diff --git a/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/retry.go b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/retry.go new file mode 100644 index 000000000..0f18f3cda --- /dev/null +++ b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/retry.go @@ -0,0 +1,114 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "context" + "io" + "math/rand" + "net/http" + "time" +) + +const ( + maxRetryAttempts = 5 +) + +var ( + syscallRetryable = func(err error) bool { return false } +) + +// defaultBackoff is basically equivalent to gax.Backoff without the need for +// the dependency. +type defaultBackoff struct { + max time.Duration + mul float64 + cur time.Duration +} + +func (b *defaultBackoff) Pause() time.Duration { + d := time.Duration(1 + rand.Int63n(int64(b.cur))) + b.cur = time.Duration(float64(b.cur) * b.mul) + if b.cur > b.max { + b.cur = b.max + } + return d +} + +// sleep is the equivalent of gax.Sleep without the need for the dependency. +func sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +func newRetryer() *metadataRetryer { + return &metadataRetryer{bo: &defaultBackoff{ + cur: 100 * time.Millisecond, + max: 30 * time.Second, + mul: 2, + }} +} + +type backoff interface { + Pause() time.Duration +} + +type metadataRetryer struct { + bo backoff + attempts int +} + +func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) { + if status == http.StatusOK { + return 0, false + } + retryOk := shouldRetry(status, err) + if !retryOk { + return 0, false + } + if r.attempts == maxRetryAttempts { + return 0, false + } + r.attempts++ + return r.bo.Pause(), true +} + +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + // Transient network errors should be retried. + if syscallRetryable(err) { + return true + } + if err, ok := err.(interface{ Temporary() bool }); ok { + if err.Temporary() { + return true + } + } + if err, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, err.Unwrap()) + } + return false +} diff --git a/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/retry_linux.go new file mode 100644 index 000000000..bb412f891 --- /dev/null +++ b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -0,0 +1,26 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package metadata + +import "syscall" + +func init() { + // Initialize syscallRetryable to return true on transient socket-level + // errors. These errors are specific to Linux. + syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } +} diff --git a/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/tidyfix.go new file mode 100644 index 000000000..4cef48500 --- /dev/null +++ b/prometheus-to-sd/vendor/cloud.google.com/go/compute/metadata/tidyfix.go @@ -0,0 +1,23 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the {{.RootMod}} import, won't actually become part of +// the resultant binary. +//go:build modhack +// +build modhack + +package metadata + +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "cloud.google.com/go/compute/internal" diff --git a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/.travis.yml b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/.travis.yml deleted file mode 100644 index c516ea88d..000000000 --- a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - "1.x" - - master -env: - - TAGS="" - - TAGS="-tags purego" -script: go test $TAGS -v ./... diff --git a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/README.md b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/README.md index 2fd8693c2..8bf0e5b78 100644 --- a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/README.md +++ b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/README.md @@ -1,10 +1,9 @@ # xxhash -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) +[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) +[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,23 +47,26 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package - [InfluxDB](https://github.com/influxdata/influxdb) - [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/testall.sh b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 000000000..94b9c4439 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash.go b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash.go index db0b35fbe..a9e0d45c9 100644 --- a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 @@ -193,7 +186,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error { b, d.v4 = consumeUint64(b) b, d.total = consumeUint64(b) copy(d.mem[:], b) - b = b[len(d.mem):] d.n = int(d.total % uint64(len(d.mem))) return nil } diff --git a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index d580e32ae..3e8b13257 100644 --- a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), CX - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is CX minus the old base pointer. - SUBQ b_base+8(FP), CX - MOVQ CX, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 000000000..7e3145a22 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b807f..9216e0a40 100644 --- a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a82160..26df13bba 100644 --- a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a3..e86f1b5fd 100644 --- a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 53bf76efb..1c1638fd8 100644 --- a/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/prometheus-to-sd/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -6,41 +7,52 @@ package xxhash import ( - "reflect" "unsafe" ) -// Notes: +// In the future it's possible that compiler optimizations will make these +// XxxString functions unnecessary by realizing that calls such as +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. +// If that happens, even if we keep these functions they can be replaced with +// the trivial safe code. + +// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: // -// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ -// for some discussion about these unsafe conversions. +// var b []byte +// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) +// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data +// bh.Len = len(s) +// bh.Cap = len(s) // -// In the future it's possible that compiler optimizations will make these -// unsafe operations unnecessary: https://golang.org/issue/2205. +// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough +// weight to this sequence of expressions that any function that uses it will +// not be inlined. Instead, the functions below use a different unsafe +// conversion designed to minimize the inliner weight and allow both to be +// inlined. There is also a test (TestInlining) which verifies that these are +// inlined. // -// Both of these wrapper functions still incur function call overhead since they -// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write -// for strings to squeeze out a bit more speed. Mid-stack inlining should -// eventually fix this. +// See https://github.com/golang/go/issues/42739 for discussion. // Sum64String computes the 64-bit xxHash digest of s. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) + b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) return Sum64(b) } // WriteString adds more data to d. It always returns len(s), nil. // It may be faster than Write([]byte(s)) by avoiding a copy. func (d *Digest) WriteString(s string) (n int, err error) { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return d.Write(b) + d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) + // d.Write always returns len(s), nil. + // Ignoring the return output and returning these fixed values buys a + // savings of 6 in the inliner's cost model. + return len(s), nil +} + +// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout +// of the first two words is the same as the layout of a string. +type sliceHeader struct { + s string + cap int } diff --git a/prometheus-to-sd/vendor/github.com/golang/glog/README b/prometheus-to-sd/vendor/github.com/golang/glog/README deleted file mode 100644 index 387b4eb68..000000000 --- a/prometheus-to-sd/vendor/github.com/golang/glog/README +++ /dev/null @@ -1,44 +0,0 @@ -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - https://github.com/google/glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/prometheus-to-sd/vendor/github.com/golang/glog/README.md b/prometheus-to-sd/vendor/github.com/golang/glog/README.md new file mode 100644 index 000000000..a4f73883b --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/golang/glog/README.md @@ -0,0 +1,36 @@ +# glog + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/golang/glog)](https://pkg.go.dev/github.com/golang/glog) + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package [_glog_](https://github.com/google/glog). + +By binding methods to booleans it is possible to use the log package without paying the expense of evaluating the arguments to the log. Through the `-vmodule` flag, the package also provides fine-grained +control over logging at the file level. + +The comment from `glog.go` introduces the ideas: + +Package _glog_ implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. It provides the functions Info, Warning, Error, Fatal, plus formatting variants such as Infof. It also provides V-style loggingcontrolled by the `-v` and `-vmodule=file=2` flags. + +Basic examples: + +```go +glog.Info("Prepare to repel boarders") + +glog.Fatalf("Initialization failed: %s", err) +``` + +See the documentation for the V function for an explanation of these examples: + +```go +if glog.V(2) { + glog.Info("Starting transaction...") +} +glog.V(2).Infoln("Processed", nItems, "elements") +``` + +The repository contains an open source version of the log package used inside Google. The master copy of the source lives inside Google, not here. The code in this repo is for export only and is not itself under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/prometheus-to-sd/vendor/github.com/golang/glog/glog.go b/prometheus-to-sd/vendor/github.com/golang/glog/glog.go index 54bd7afdc..e108ae8b4 100644 --- a/prometheus-to-sd/vendor/github.com/golang/glog/glog.go +++ b/prometheus-to-sd/vendor/github.com/golang/glog/glog.go @@ -1,6 +1,6 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// Go support for leveled logs, analogous to https://github.com/google/glog. // -// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2023 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -50,15 +50,15 @@ // Log files will be written to this directory instead of the // default temporary directory. // -// Other flags provide aids to debugging. +// Other flags provide aids to debugging. // // -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as +// A comma-separated list of file and line numbers holding a logging +// statement, such as // -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) +// A stack trace will be written to the Info log whenever execution +// hits one of these statements. (Unlike with -vmodule, the ".go" +// must bepresent.) // -v=0 // Enable V-leveled logging at the specified level. // -vmodule="" @@ -66,100 +66,47 @@ // where pattern is a literal file name (minus the ".go" suffix) or // "glob" pattern and N is a V level. For instance, // -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// +// sets the V level to 3 in all Go files whose names begin with "gopher", +// and +// -vmodule=/path/to/glog/glog_test=1 +// sets the V level to 1 in the Go file /path/to/glog/glog_test.go. +// If a glob pattern contains a slash, it is matched against the full path, +// and the file name. Otherwise, the pattern is +// matched only against the file's basename. When both -vmodule and -v +// are specified, the -vmodule values take precedence for the specified +// modules. package glog +// This file contains the parts of the log package that are shared among all +// implementations (file, envelope, and appengine). + import ( - "bufio" "bytes" "errors" - "flag" "fmt" - "io" stdLog "log" "os" - "path/filepath" + "reflect" "runtime" + "runtime/pprof" "strconv" - "strings" "sync" "sync/atomic" + "syscall" "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 + "github.com/golang/glog/internal/logsink" + "github.com/golang/glog/internal/stackdump" ) -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} +var timeNow = time.Now // Stubbed out for testing. -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} +// ErrNoLog is the error we return if no log file has yet been created +// for the specified log type. +var ErrNoLog = errors.New("log file not yet created") // OutputStats tracks the number of output lines and bytes written. type OutputStats struct { @@ -183,724 +130,99 @@ var Stats struct { Info, Warning, Error OutputStats } -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, +var severityStats = [...]*OutputStats{ + logsink.Info: &Stats.Info, + logsink.Warning: &Stats.Warning, + logsink.Error: &Stats.Error, + logsink.Fatal: nil, } -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. +// Level specifies a level of verbosity for V logs. The -v flag is of type +// Level and should be modified only through the flag.Value interface. type Level int32 -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} +var metaPool sync.Pool // Pool of *logsink.Meta. -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) +// metaPoolGet returns a *logsink.Meta from metaPool as both an interface and a +// pointer, allocating a new one if necessary. (Returning the interface value +// directly avoids an allocation if there was an existing pointer in the pool.) +func metaPoolGet() (any, *logsink.Meta) { + if metai := metaPool.Get(); metai != nil { + return metai, metai.(*logsink.Meta) } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil + meta := new(logsink.Meta) + return meta, meta } -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} +type stack bool -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. +const ( + noStack = stack(false) + withStack = stack(true) +) -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) +func appendBacktrace(depth int, format string, args []any) (string, []any) { + // Capture a backtrace as a stackdump.Stack (both text and PC slice). + // Structured log sinks can extract the backtrace in whichever format they + // prefer (PCs or text), and Text sinks will include it as just another part + // of the log message. + // + // Use depth instead of depth+1 so that the backtrace always includes the + // log function itself - otherwise the reason for the trace appearing in the + // log may not be obvious to the reader. + dump := stackdump.Caller(depth) + + // Add an arg and an entry in the format string for the stack dump. + // + // Copy the "args" slice to avoid a rare but serious aliasing bug + // (corrupting the caller's slice if they passed it to a non-Fatal call + // using "..."). + format = format + "\n\n%v\n" + args = append(append([]any(nil), args...), dump) + + return format, args +} + +// logf writes a log message for a log function call (or log function wrapper) +// at the given depth in the current goroutine's stack. +func logf(depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) { + now := timeNow() + _, file, line, ok := runtime.Caller(depth + 1) if !ok { file = "???" line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } } - return l.formatHeader(s, file, line), file, line -} -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits + if stack == withStack || backtraceAt(file, line) { + format, args = appendBacktrace(depth+1, format, args) } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } + metai, meta := metaPoolGet() + *meta = logsink.Meta{ + Time: now, + File: file, + Line: line, + Depth: depth + 1, + Severity: severity, + Verbose: verbose, + Thread: int64(pid), } - return copy(buf.tmp[i:], buf.tmp[j:]) + sinkf(meta, format, args...) + metaPool.Put(metai) } -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if !flag.Parsed() { - os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) - os.Stderr.Write(data) - } else if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { +func sinkf(meta *logsink.Meta, format string, args ...any) { + meta.Depth++ + n, err := logsink.Printf(meta, format, args...) + if stats := severityStats[meta.Severity]; stats != nil { atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } + atomic.AddInt64(&stats.bytes, int64(n)) } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } + logsink.Printf(meta, "glog: exiting because of error: %s", err) + sinks.file.Flush() + os.Exit(2) } } @@ -912,9 +234,9 @@ func (l *loggingT) flushAll() { // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + sev, err := logsink.ParseSeverity(name) + if err != nil { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): %v", name, err)) } // Set a log format that captures the user's file and line: // d.go:23: message @@ -922,9 +244,22 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the Google logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, err := logsink.ParseSeverity(name) + if err != nil { + panic(fmt.Sprintf("log.NewStandardLogger(%q): %v", name, err)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. -type logBridge severity +type logBridge logsink.Severity // Write parses the standard logging line and passes its components to the // logger for severity(lb). @@ -946,36 +281,72 @@ func (lb logBridge) Write(b []byte) (n int, err error) { line = 1 } } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) + + // The depth below hard-codes details of how stdlog gets here. The alternative would be to walk + // up the stack looking for src/log/log.go but that seems like it would be + // unfortunately slow. + const stdLogDepth = 4 + + metai, meta := metaPoolGet() + *meta = logsink.Meta{ + Time: timeNow(), + File: file, + Line: line, + Depth: stdLogDepth, + Severity: logsink.Severity(lb), + Thread: int64(pid), + } + + format := "%s" + args := []any{text} + if backtraceAt(file, line) { + format, args = appendBacktrace(meta.Depth, format, args) + } + + sinkf(meta, format, args...) + metaPool.Put(metai) + return len(b), nil } -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] +// defaultFormat returns a fmt.Printf format specifier that formats its +// arguments as if they were passed to fmt.Print. +func defaultFormat(args []any) string { + n := len(args) + switch n { + case 0: + return "" + case 1: + return "%v" + } + + b := make([]byte, 0, n*3-1) + wasString := true // Suppress leading space. + for _, arg := range args { + isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String + if wasString || isString { + b = append(b, "%v"...) + } else { + b = append(b, " %v"...) + } + wasString = isString } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] + return string(b) +} + +// lnFormat returns a fmt.Printf format specifier that formats its arguments +// as if they were passed to fmt.Println. +func lnFormat(args []any) string { + if len(args) == 0 { + return "\n" } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } + + b := make([]byte, 0, len(args)*3) + for range args { + b = append(b, "%v "...) } - l.vmap[pc] = 0 - return 0 + b[len(b)-1] = '\n' // Replace the last space with a newline. + return string(b) } // Verbose is a boolean type that implements Infof (like Printf) etc. @@ -986,195 +357,265 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either +// // if glog.V(2) { glog.Info("log this") } +// // or +// // glog.V(2).Info("log this") +// // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // // Whether an individual call to V generates a log record depends on the setting of // the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the +// V is at most the value of -v, or of -vmodule for the source file containing the // call, the V call will log. func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. + return VDepth(1, level) +} - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } +// VDepth acts as V but uses depth to determine which call frame to check vmodule for. +// VDepth(0, level) is the same as V(level). +func VDepth(depth int, level Level) Verbose { + return Verbose(verboseEnabled(depth+1, level)) +} - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...any) { + v.InfoDepth(1, args...) +} + +// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepth(depth int, args ...any) { + if v { + logf(depth+1, logsink.Info, true, noStack, defaultFormat(args), args...) } - return Verbose(false) } -// Info is equivalent to the global Info function, guarded by the value of v. +// InfoDepthf is equivalent to the global InfoDepthf function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { +func (v Verbose) InfoDepthf(depth int, format string, args ...any) { if v { - logging.print(infoLog, args...) + logf(depth+1, logsink.Info, true, noStack, format, args...) } } // Infoln is equivalent to the global Infoln function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { +func (v Verbose) Infoln(args ...any) { if v { - logging.println(infoLog, args...) + logf(1, logsink.Info, true, noStack, lnFormat(args), args...) } } // Infof is equivalent to the global Infof function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { +func (v Verbose) Infof(format string, args ...any) { if v { - logging.printf(infoLog, format, args...) + logf(1, logsink.Info, true, noStack, format, args...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) +func Info(args ...any) { + InfoDepth(1, args...) +} + +// InfoDepth calls Info from a different depth in the call stack. +// This enables a callee to emit logs that use the callsite information of its caller +// or any other callers in the stack. When depth == 0, the original callee's line +// information is emitted. When depth > 0, depth frames are skipped in the call stack +// and the final frame is treated like the original callee to Info. +func InfoDepth(depth int, args ...any) { + logf(depth+1, logsink.Info, false, noStack, defaultFormat(args), args...) } -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) +// InfoDepthf acts as InfoDepth but with format string. +func InfoDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Info, false, noStack, format, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) +func Infoln(args ...any) { + logf(1, logsink.Info, false, noStack, lnFormat(args), args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) +func Infof(format string, args ...any) { + logf(1, logsink.Info, false, noStack, format, args...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) +func Warning(args ...any) { + WarningDepth(1, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) +func WarningDepth(depth int, args ...any) { + logf(depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...) +} + +// WarningDepthf acts as Warningf but uses depth to determine which call frame to log. +// WarningDepthf(0, "msg") is the same as Warningf("msg"). +func WarningDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Warning, false, noStack, format, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) +func Warningln(args ...any) { + logf(1, logsink.Warning, false, noStack, lnFormat(args), args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) +func Warningf(format string, args ...any) { + logf(1, logsink.Warning, false, noStack, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) +func Error(args ...any) { + ErrorDepth(1, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) +func ErrorDepth(depth int, args ...any) { + logf(depth+1, logsink.Error, false, noStack, defaultFormat(args), args...) +} + +// ErrorDepthf acts as Errorf but uses depth to determine which call frame to log. +// ErrorDepthf(0, "msg") is the same as Errorf("msg"). +func ErrorDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Error, false, noStack, format, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) +func Errorln(args ...any) { + logf(1, logsink.Error, false, noStack, lnFormat(args), args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) +func Errorf(format string, args ...any) { + logf(1, logsink.Error, false, noStack, format, args...) +} + +func fatalf(depth int, format string, args ...any) { + logf(depth+1, logsink.Fatal, false, withStack, format, args...) + sinks.file.Flush() + + err := abortProcess() // Should not return. + + // Failed to abort the process using signals. Dump a stack trace and exit. + Errorf("abortProcess returned unexpectedly: %v", err) + sinks.file.Flush() + pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) + os.Exit(2) // Exit with the same code as the default SIGABRT handler. +} + +// abortProcess attempts to kill the current process in a way that will dump the +// currently-running goroutines someplace useful (Coroner or stderr). +// +// It does this by sending SIGABRT to the current process. Unfortunately, the +// signal may or may not be delivered to the current thread; in order to do that +// portably, we would need to add a cgo dependency and call pthread_kill. +// +// If successful, abortProcess does not return. +func abortProcess() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + if err := p.Signal(syscall.SIGABRT); err != nil { + return err + } + + // Sent the signal. Now we wait for it to arrive and any SIGABRT handlers to + // run (and eventually terminate the process themselves). + // + // We could just "select{}" here, but there's an outside chance that would + // trigger the runtime's deadlock detector if there happen not to be any + // background goroutines running. So we'll sleep a while first to give + // the signal some time. + time.Sleep(10 * time.Second) + select {} } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) +func Fatal(args ...any) { + FatalDepth(1, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) +func FatalDepth(depth int, args ...any) { + fatalf(depth+1, defaultFormat(args), args...) +} + +// FatalDepthf acts as Fatalf but uses depth to determine which call frame to log. +// FatalDepthf(0, "msg") is the same as Fatalf("msg"). +func FatalDepthf(depth int, format string, args ...any) { + fatalf(depth+1, format, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) +func Fatalln(args ...any) { + fatalf(1, lnFormat(args), args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) +func Fatalf(format string, args ...any) { + fatalf(1, format, args...) } -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 +func exitf(depth int, format string, args ...any) { + logf(depth+1, logsink.Fatal, false, noStack, format, args...) + sinks.file.Flush() + os.Exit(1) +} // Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) +func Exit(args ...any) { + ExitDepth(1, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) +func ExitDepth(depth int, args ...any) { + exitf(depth+1, defaultFormat(args), args...) +} + +// ExitDepthf acts as Exitf but uses depth to determine which call frame to log. +// ExitDepthf(0, "msg") is the same as Exitf("msg"). +func ExitDepthf(depth int, format string, args ...any) { + exitf(depth+1, format, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) +func Exitln(args ...any) { + exitf(1, lnFormat(args), args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) +func Exitf(format string, args ...any) { + exitf(1, format, args...) } diff --git a/prometheus-to-sd/vendor/github.com/golang/glog/glog_file.go b/prometheus-to-sd/vendor/github.com/golang/glog/glog_file.go index 65075d281..af1c934b8 100644 --- a/prometheus-to-sd/vendor/github.com/golang/glog/glog_file.go +++ b/prometheus-to-sd/vendor/github.com/golang/glog/glog_file.go @@ -1,6 +1,6 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// Go support for leveled logs, analogous to https://github.com/google/glog. // -// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2023 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,26 +19,34 @@ package glog import ( + "bufio" + "bytes" "errors" "flag" "fmt" + "io" "os" "os/user" "path/filepath" + "runtime" "strings" "sync" "time" -) -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 + "github.com/golang/glog/internal/logsink" +) // logDirs lists the candidate directories for new log files. var logDirs []string -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") +var ( + // If non-empty, overrides the choice of directory in which to write logs. + // See createLogDirs for the full list of possible destinations. + logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + logLink = flag.String("log_link", "", "If non-empty, add symbolic links in this directory to the log files") + logBufLevel = flag.Int("logbuflevel", int(logsink.Info), "Buffer log messages logged at this level or lower"+ + " (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.") +) func createLogDirs() { if *logDir != "" { @@ -64,9 +72,17 @@ func init() { if err == nil { userName = current.Username } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) + // Sanitize userName since it is used to construct file paths. + userName = strings.Map(func(r rune) rune { + switch { + case r >= 'a' && r <= 'z': + case r >= 'A' && r <= 'Z': + case r >= '0' && r <= '9': + default: + return '_' + } + return r + }, userName) } // shortHostname returns its argument, truncating at the first period. @@ -122,3 +138,270 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) { } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer + filenames() []string +} + +var sinks struct { + stderr stderrSink + file fileSink +} + +func init() { + sinks.stderr.w = os.Stderr + + // Register stderr first: that way if we crash during file-writing at least + // the log will have gone somewhere. + logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file) + + sinks.file.flushChan = make(chan logsink.Severity, 1) + go sinks.file.flushDaemon() +} + +// stderrSink is a logsink.Text that writes log entries to stderr +// if they meet certain conditions. +type stderrSink struct { + mu sync.Mutex + w io.Writer +} + +// Enabled implements logsink.Text.Enabled. It returns true if any of the +// various stderr flags are enabled for logs of the given severity, if the log +// message is from the standard "log" package, or if google.Init has not yet run +// (and hence file logging is not yet initialized). +func (s *stderrSink) Enabled(m *logsink.Meta) bool { + return toStderr || alsoToStderr || m.Severity >= stderrThreshold.get() +} + +// Emit implements logsink.Text.Emit. +func (s *stderrSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + dn, err := s.w.Write(data) + n += dn + return n, err +} + +// severityWriters is an array of flushSyncWriter with a value for each +// logsink.Severity. +type severityWriters [4]flushSyncWriter + +// fileSink is a logsink.Text that prints to a set of Google log files. +type fileSink struct { + mu sync.Mutex + // file holds writer for each of the log types. + file severityWriters + flushChan chan logsink.Severity +} + +// Enabled implements logsink.Text.Enabled. It returns true if google.Init +// has run and both --disable_log_to_disk and --logtostderr are false. +func (s *fileSink) Enabled(m *logsink.Meta) bool { + return !toStderr +} + +// Emit implements logsink.Text.Emit +func (s *fileSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + if err = s.createMissingFiles(m.Severity); err != nil { + return 0, err + } + for sev := m.Severity; sev >= logsink.Info; sev-- { + if _, fErr := s.file[sev].Write(data); fErr != nil && err == nil { + err = fErr // Take the first error. + } + } + n = len(data) + if int(m.Severity) > *logBufLevel { + select { + case s.flushChan <- m.Severity: + default: + } + } + + return n, err +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// s.mu is held for all its methods. +type syncBuffer struct { + sink *fileSink + *bufio.Writer + file *os.File + names []string + sev logsink.Severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + return 0, err + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + return n, err +} + +func (sb *syncBuffer) filenames() []string { + return sb.names +} + +const footer = "\nCONTINUED IN NEXT FILE\n" + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + var err error + pn := "" + file, name, err := create(sb.sev.String(), now) + + if sb.file != nil { + // The current log file becomes the previous log at the end of + // this block, so save its name for use in the header of the next + // file. + pn = sb.file.Name() + sb.Flush() + // If there's an existing file, write a footer with the name of + // the next file in the chain, followed by the constant string + // \nCONTINUED IN NEXT FILE\n to make continuation detection simple. + sb.file.Write([]byte("Next log: ")) + sb.file.Write([]byte(name)) + sb.file.Write([]byte(footer)) + sb.file.Close() + } + + sb.file = file + sb.names = append(sb.names, name) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Previous log: %s\n", pn) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createMissingFiles creates all the log files for severity from infoLog up to +// upTo that have not already been created. +// s.mu is held. +func (s *fileSink) createMissingFiles(upTo logsink.Severity) error { + if s.file[upTo] != nil { + return nil + } + now := time.Now() + // Files are created in increasing severity order, so we can be assured that + // if a high severity logfile exists, then so do all of lower severity. + for sev := logsink.Info; sev <= upTo; sev++ { + if s.file[sev] != nil { + continue + } + sb := &syncBuffer{ + sink: s, + sev: sev, + } + if err := sb.rotateFile(now); err != nil { + return err + } + s.file[sev] = sb + } + return nil +} + +// flushDaemon periodically flushes the log file buffers. +func (s *fileSink) flushDaemon() { + tick := time.NewTicker(30 * time.Second) + defer tick.Stop() + for { + select { + case <-tick.C: + s.Flush() + case sev := <-s.flushChan: + s.flush(sev) + } + } +} + +// Flush flushes all pending log I/O. +func Flush() { + sinks.file.Flush() +} + +// Flush flushes all the logs and attempts to "sync" their data to disk. +func (s *fileSink) Flush() error { + return s.flush(logsink.Info) +} + +// flush flushes all logs of severity threshold or greater. +func (s *fileSink) flush(threshold logsink.Severity) error { + s.mu.Lock() + defer s.mu.Unlock() + + var firstErr error + updateErr := func(err error) { + if err != nil && firstErr == nil { + firstErr = err + } + } + + // Flush from fatal down, in case there's trouble flushing. + for sev := logsink.Fatal; sev >= threshold; sev-- { + file := s.file[sev] + if file != nil { + updateErr(file.Flush()) + updateErr(file.Sync()) + } + } + + return firstErr +} + +// Names returns the names of the log files holding the FATAL, ERROR, +// WARNING, or INFO logs. Returns ErrNoLog if the log for the given +// level doesn't exist (e.g. because no messages of that level have been +// written). This may return multiple names if the log type requested +// has rolled over. +func Names(s string) ([]string, error) { + severity, err := logsink.ParseSeverity(s) + if err != nil { + return nil, err + } + + sinks.file.mu.Lock() + defer sinks.file.mu.Unlock() + f := sinks.file.file[severity] + if f == nil { + return nil, ErrNoLog + } + + return f.filenames(), nil +} diff --git a/prometheus-to-sd/vendor/github.com/golang/glog/glog_flags.go b/prometheus-to-sd/vendor/github.com/golang/glog/glog_flags.go new file mode 100644 index 000000000..3060e54d9 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/golang/glog/glog_flags.go @@ -0,0 +1,395 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package glog + +import ( + "bytes" + "errors" + "flag" + "fmt" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/golang/glog/internal/logsink" +) + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + full bool // The pattern wants to match the full path + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(full, file string) bool { + if m.literal { + if m.full { + return full == m.pattern + } + return file == m.pattern + } + if m.full { + match, _ := filepath.Match(m.pattern, full) + return match + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// isFull reports whether the pattern matches the full file path, that is, +// whether it contains /. +func isFull(pattern string) bool { + return strings.ContainsRune(pattern, '/') +} + +// verboseFlags represents the setting of the -v and -vmodule flags. +type verboseFlags struct { + // moduleLevelCache is a sync.Map storing the -vmodule Level for each V() + // call site, identified by PC. If there is no matching -vmodule filter, + // the cached value is exactly v. moduleLevelCache is replaced with a new + // Map whenever the -vmodule or -v flag changes state. + moduleLevelCache atomic.Value + + // mu guards all fields below. + mu sync.Mutex + + // v stores the value of the -v flag. It may be read safely using + // sync.LoadInt32, but is only modified under mu. + v Level + + // module stores the parsed -vmodule flag. + module []modulePat + + // moduleLength caches len(module). If greater than zero, it + // means vmodule is enabled. It may be read safely using sync.LoadInt32, but + // is only modified under mu. + moduleLength int32 +} + +// NOTE: For compatibility with the open-sourced v1 version of this +// package (github.com/golang/glog) we need to retain that flag.Level +// implements the flag.Value interface. See also go/log-vs-glog. + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(l.Get().(Level)), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() any { + if l == &vflags.v { + // l is the value registered for the -v flag. + return Level(atomic.LoadInt32((*int32)(l))) + } + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + if l == &vflags.v { + // l is the value registered for the -v flag. + vflags.mu.Lock() + defer vflags.mu.Unlock() + vflags.moduleLevelCache.Store(&sync.Map{}) + atomic.StoreInt32((*int32)(l), int32(v)) + return nil + } + *l = Level(v) + return nil +} + +// vModuleFlag is the flag.Value for the --vmodule flag. +type vModuleFlag struct{ *verboseFlags } + +func (f vModuleFlag) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var b bytes.Buffer + for i, f := range f.module { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get returns nil for this flag type since the struct is not exported. +func (f vModuleFlag) Get() any { return nil } + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,foo/bar/baz=1,gfs*=3 +func (f vModuleFlag) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), isFull(pattern), Level(v)}) + } + + f.mu.Lock() + defer f.mu.Unlock() + f.module = filter + atomic.StoreInt32((*int32)(&f.moduleLength), int32(len(f.module))) + f.moduleLevelCache.Store(&sync.Map{}) + return nil +} + +func (f *verboseFlags) levelForPC(pc uintptr) Level { + if level, ok := f.moduleLevelCache.Load().(*sync.Map).Load(pc); ok { + return level.(Level) + } + + f.mu.Lock() + defer f.mu.Unlock() + level := Level(f.v) + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d for + // regular matches, /a/b/c/d for full matches. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + full := file + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range f.module { + if filter.match(full, file) { + level = filter.level + break // Use the first matching level. + } + } + f.moduleLevelCache.Load().(*sync.Map).Store(pc, level) + return level +} + +func (f *verboseFlags) enabled(callerDepth int, level Level) bool { + if atomic.LoadInt32(&f.moduleLength) == 0 { + // No vmodule values specified, so compare against v level. + return Level(atomic.LoadInt32((*int32)(&f.v))) >= level + } + + pcs := [1]uintptr{} + if runtime.Callers(callerDepth+2, pcs[:]) < 1 { + return false + } + frame, _ := runtime.CallersFrames(pcs[:]).Next() + return f.levelForPC(frame.Entry) >= level +} + +// traceLocation represents an entry in the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +func parseTraceLocation(value string) (traceLocation, error) { + fields := strings.Split(value, ":") + if len(fields) != 2 { + return traceLocation{}, errTraceSyntax + } + file, lineStr := fields[0], fields[1] + if !strings.Contains(file, ".") { + return traceLocation{}, errTraceSyntax + } + line, err := strconv.Atoi(lineStr) + if err != nil { + return traceLocation{}, errTraceSyntax + } + if line < 0 { + return traceLocation{}, errors.New("negative value for line") + } + return traceLocation{file, line}, nil +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +func (t traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t traceLocation) String() string { + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// traceLocations represents the -log_backtrace_at flag. +// Syntax: -log_backtrace_at=recordio.go:234,sstable.go:456 +// Note that unlike vmodule the file extension is included here. +type traceLocations struct { + mu sync.Mutex + locsLen int32 // Safe for atomic read without mu. + locs []traceLocation +} + +func (t *traceLocations) String() string { + t.mu.Lock() + defer t.mu.Unlock() + + var buf bytes.Buffer + for i, tl := range t.locs { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString(tl.String()) + } + return buf.String() +} + +// Get always returns nil for this flag type since the struct is not exported +func (t *traceLocations) Get() any { return nil } + +func (t *traceLocations) Set(value string) error { + var locs []traceLocation + for _, s := range strings.Split(value, ",") { + if s == "" { + continue + } + loc, err := parseTraceLocation(s) + if err != nil { + return err + } + locs = append(locs, loc) + } + + t.mu.Lock() + defer t.mu.Unlock() + atomic.StoreInt32(&t.locsLen, int32(len(locs))) + t.locs = locs + return nil +} + +func (t *traceLocations) match(file string, line int) bool { + if atomic.LoadInt32(&t.locsLen) == 0 { + return false + } + + t.mu.Lock() + defer t.mu.Unlock() + for _, tl := range t.locs { + if tl.match(file, line) { + return true + } + } + return false +} + +// severityFlag is an atomic flag.Value implementation for logsink.Severity. +type severityFlag int32 + +func (s *severityFlag) get() logsink.Severity { + return logsink.Severity(atomic.LoadInt32((*int32)(s))) +} +func (s *severityFlag) String() string { return strconv.FormatInt(int64(*s), 10) } +func (s *severityFlag) Get() any { return s.get() } +func (s *severityFlag) Set(value string) error { + threshold, err := logsink.ParseSeverity(value) + if err != nil { + // Not a severity name. Try a raw number. + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = logsink.Severity(v) + if threshold < logsink.Info || threshold > logsink.Fatal { + return fmt.Errorf("Severity %d out of range (min %d, max %d).", v, logsink.Info, logsink.Fatal) + } + } + atomic.StoreInt32((*int32)(s), int32(threshold)) + return nil +} + +var ( + vflags verboseFlags // The -v and -vmodule flags. + + logBacktraceAt traceLocations // The -log_backtrace_at flag. + + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + stderrThreshold severityFlag // The -stderrthreshold flag. +) + +// verboseEnabled returns whether the caller at the given depth should emit +// verbose logs at the given level, with depth 0 identifying the caller of +// verboseEnabled. +func verboseEnabled(callerDepth int, level Level) bool { + return vflags.enabled(callerDepth+1, level) +} + +// backtraceAt returns whether the logging call at the given function and line +// should also emit a backtrace of the current call stack. +func backtraceAt(file string, line int) bool { + return logBacktraceAt.match(file, line) +} + +func init() { + vflags.moduleLevelCache.Store(&sync.Map{}) + + flag.Var(&vflags.v, "v", "log level for V logs") + flag.Var(vModuleFlag{&vflags}, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + + flag.Var(&logBacktraceAt, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + stderrThreshold = severityFlag(logsink.Error) + + flag.BoolVar(&toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") +} diff --git a/prometheus-to-sd/vendor/github.com/golang/glog/internal/logsink/logsink.go b/prometheus-to-sd/vendor/github.com/golang/glog/internal/logsink/logsink.go new file mode 100644 index 000000000..53758e1c9 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/golang/glog/internal/logsink/logsink.go @@ -0,0 +1,387 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logsink + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/golang/glog/internal/stackdump" +) + +// MaxLogMessageLen is the limit on length of a formatted log message, including +// the standard line prefix and trailing newline. +// +// Chosen to match C++ glog. +const MaxLogMessageLen = 15000 + +// A Severity is a severity at which a message can be logged. +type Severity int8 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + Info Severity = iota + Warning + Error + + // Fatal contains logs written immediately before the process terminates. + // + // Sink implementations should not terminate the process themselves: the log + // package will perform any necessary cleanup and terminate the process as + // appropriate. + Fatal +) + +func (s Severity) String() string { + switch s { + case Info: + return "INFO" + case Warning: + return "WARNING" + case Error: + return "ERROR" + case Fatal: + return "FATAL" + } + return fmt.Sprintf("%T(%d)", s, s) +} + +// ParseSeverity returns the case-insensitive Severity value for the given string. +func ParseSeverity(name string) (Severity, error) { + name = strings.ToUpper(name) + for s := Info; s <= Fatal; s++ { + if s.String() == name { + return s, nil + } + } + return -1, fmt.Errorf("logsink: invalid severity %q", name) +} + +// Meta is metadata about a logging call. +type Meta struct { + // Time is the time at which the log call was made. + Time time.Time + + // File is the source file from which the log entry originates. + File string + // Line is the line offset within the source file. + Line int + // Depth is the number of stack frames between the logsink and the log call. + Depth int + + Severity Severity + + // Verbose indicates whether the call was made via "log.V". Log entries below + // the current verbosity threshold are not sent to the sink. + Verbose bool + + // Thread ID. This can be populated with a thread ID from another source, + // such as a system we are importing logs from. In the normal case, this + // will be set to the process ID (PID), since Go doesn't have threads. + Thread int64 + + // Stack trace starting in the logging function. May be nil. + // A logsink should implement the StackWanter interface to request this. + // + // Even if WantStack returns false, this field may be set (e.g. if another + // sink wants a stack trace). + Stack *stackdump.Stack +} + +// Structured is a logging destination that accepts structured data as input. +type Structured interface { + // Printf formats according to a fmt.Printf format specifier and writes a log + // entry. The precise result of formatting depends on the sink, but should + // aim for consistency with fmt.Printf. + // + // Printf returns the number of bytes occupied by the log entry, which + // may not be equal to the total number of bytes written. + // + // Printf returns any error encountered *if* it is severe enough that the log + // package should terminate the process. + // + // The sink must not modify the *Meta parameter, nor reference it after + // Printf has returned: it may be reused in subsequent calls. + Printf(meta *Meta, format string, a ...any) (n int, err error) +} + +// StackWanter can be implemented by a logsink.Structured to indicate that it +// wants a stack trace to accompany at least some of the log messages it receives. +type StackWanter interface { + // WantStack returns true if the sink requires a stack trace for a log message + // with this metadata. + // + // NOTE: Returning true implies that meta.Stack will be non-nil. Returning + // false does NOT imply that meta.Stack will be nil. + WantStack(meta *Meta) bool +} + +// Text is a logging destination that accepts pre-formatted log lines (instead of +// structured data). +type Text interface { + // Enabled returns whether this sink should output messages for the given + // Meta. If the sink returns false for a given Meta, the Printf function will + // not call Emit on it for the corresponding log message. + Enabled(*Meta) bool + + // Emit writes a pre-formatted text log entry (including any applicable + // header) to the log. It returns the number of bytes occupied by the entry + // (which may differ from the length of the passed-in slice). + // + // Emit returns any error encountered *if* it is severe enough that the log + // package should terminate the process. + // + // The sink must not modify the *Meta parameter, nor reference it after + // Printf has returned: it may be reused in subsequent calls. + // + // NOTE: When developing a text sink, keep in mind the surface in which the + // logs will be displayed, and whether it's important that the sink be + // resistent to tampering in the style of b/211428300. Standard text sinks + // (like `stderrSink`) do not protect against this (e.g. by escaping + // characters) because the cases where they would show user-influenced bytes + // are vanishingly small. + Emit(*Meta, []byte) (n int, err error) +} + +// bufs is a pool of *bytes.Buffer used in formatting log entries. +var bufs sync.Pool // Pool of *bytes.Buffer. + +// textPrintf formats a text log entry and emits it to all specified Text sinks. +// +// The returned n is the maximum across all Emit calls. +// The returned err is the first non-nil error encountered. +// Sinks that are disabled by configuration should return (0, nil). +func textPrintf(m *Meta, textSinks []Text, format string, args ...any) (n int, err error) { + // We expect at most file, stderr, and perhaps syslog. If there are more, + // we'll end up allocating - no big deal. + const maxExpectedTextSinks = 3 + var noAllocSinks [maxExpectedTextSinks]Text + + sinks := noAllocSinks[:0] + for _, s := range textSinks { + if s.Enabled(m) { + sinks = append(sinks, s) + } + } + if len(sinks) == 0 && m.Severity != Fatal { + return 0, nil // No TextSinks specified; don't bother formatting. + } + + bufi := bufs.Get() + var buf *bytes.Buffer + if bufi == nil { + buf = bytes.NewBuffer(nil) + bufi = buf + } else { + buf = bufi.(*bytes.Buffer) + buf.Reset() + } + + // Lmmdd hh:mm:ss.uuuuuu PID/GID file:line] + // + // The "PID" entry arguably ought to be TID for consistency with other + // environments, but TID is not meaningful in a Go program due to the + // multiplexing of goroutines across threads. + // + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + const severityChar = "IWEF" + buf.WriteByte(severityChar[m.Severity]) + + _, month, day := m.Time.Date() + hour, minute, second := m.Time.Clock() + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + nDigits(buf, 6, uint64(m.Time.Nanosecond()/1000), '0') + buf.WriteByte(' ') + + nDigits(buf, 7, uint64(m.Thread), ' ') + buf.WriteByte(' ') + + { + file := m.File + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + buf.WriteString(file) + } + + buf.WriteByte(':') + { + var tmp [19]byte + buf.Write(strconv.AppendInt(tmp[:0], int64(m.Line), 10)) + } + buf.WriteString("] ") + + msgStart := buf.Len() + fmt.Fprintf(buf, format, args...) + if buf.Len() > MaxLogMessageLen-1 { + buf.Truncate(MaxLogMessageLen - 1) + } + msgEnd := buf.Len() + if b := buf.Bytes(); b[len(b)-1] != '\n' { + buf.WriteByte('\n') + } + + for _, s := range sinks { + sn, sErr := s.Emit(m, buf.Bytes()) + if sn > n { + n = sn + } + if sErr != nil && err == nil { + err = sErr + } + } + + if m.Severity == Fatal { + savedM := *m + fatalMessageStore(savedEntry{ + meta: &savedM, + msg: buf.Bytes()[msgStart:msgEnd], + }) + } else { + bufs.Put(bufi) + } + return n, err +} + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer to buf. +func twoDigits(buf *bytes.Buffer, d int) { + buf.WriteByte(digits[(d/10)%10]) + buf.WriteByte(digits[d%10]) +} + +// nDigits formats an n-digit integer to buf, padding with pad on the left. It +// assumes d != 0. +func nDigits(buf *bytes.Buffer, n int, d uint64, pad byte) { + var tmp [20]byte + + cutoff := len(tmp) - n + j := len(tmp) - 1 + for ; d > 0; j-- { + tmp[j] = digits[d%10] + d /= 10 + } + for ; j >= cutoff; j-- { + tmp[j] = pad + } + j++ + buf.Write(tmp[j:]) +} + +// Printf writes a log entry to all registered TextSinks in this package, then +// to all registered StructuredSinks. +// +// The returned n is the maximum across all Emit and Printf calls. +// The returned err is the first non-nil error encountered. +// Sinks that are disabled by configuration should return (0, nil). +func Printf(m *Meta, format string, args ...any) (n int, err error) { + m.Depth++ + n, err = textPrintf(m, TextSinks, format, args...) + + for _, sink := range StructuredSinks { + // TODO: Support TextSinks that implement StackWanter? + if sw, ok := sink.(StackWanter); ok && sw.WantStack(m) { + if m.Stack == nil { + // First, try to find a stacktrace in args, otherwise generate one. + for _, arg := range args { + if stack, ok := arg.(stackdump.Stack); ok { + m.Stack = &stack + break + } + } + if m.Stack == nil { + stack := stackdump.Caller( /* skipDepth = */ m.Depth) + m.Stack = &stack + } + } + } + sn, sErr := sink.Printf(m, format, args...) + if sn > n { + n = sn + } + if sErr != nil && err == nil { + err = sErr + } + } + return n, err +} + +// The sets of sinks to which logs should be written. +// +// These must only be modified during package init, and are read-only thereafter. +var ( + // StructuredSinks is the set of Structured sink instances to which logs + // should be written. + StructuredSinks []Structured + + // TextSinks is the set of Text sink instances to which logs should be + // written. + // + // These are registered separately from Structured sink implementations to + // avoid the need to repeat the work of formatting a message for each Text + // sink that writes it. The package-level Printf function writes to both sets + // independenty, so a given log destination should only register a Structured + // *or* a Text sink (not both). + TextSinks []Text +) + +type savedEntry struct { + meta *Meta + msg []byte +} + +// StructuredTextWrapper is a Structured sink which forwards logs to a set of Text sinks. +// +// The purpose of this sink is to allow applications to intercept logging calls before they are +// serialized and sent to Text sinks. For example, if one needs to redact PII from logging +// arguments before they reach STDERR, one solution would be to do the redacting in a Structured +// sink that forwards logs to a StructuredTextWrapper instance, and make STDERR a child of that +// StructuredTextWrapper instance. This is how one could set this up in their application: +// +// func init() { +// +// wrapper := logsink.StructuredTextWrapper{TextSinks: logsink.TextSinks} +// // sanitizersink will intercept logs and remove PII +// sanitizer := sanitizersink{Sink: &wrapper} +// logsink.StructuredSinks = append(logsink.StructuredSinks, &sanitizer) +// logsink.TextSinks = nil +// +// } +type StructuredTextWrapper struct { + // TextSinks is the set of Text sinks that should receive logs from this + // StructuredTextWrapper instance. + TextSinks []Text +} + +// Printf forwards logs to all Text sinks registered in the StructuredTextWrapper. +func (w *StructuredTextWrapper) Printf(meta *Meta, format string, args ...any) (n int, err error) { + return textPrintf(meta, w.TextSinks, format, args...) +} diff --git a/prometheus-to-sd/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go b/prometheus-to-sd/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go new file mode 100644 index 000000000..3dc269abc --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go @@ -0,0 +1,35 @@ +package logsink + +import ( + "sync/atomic" + "unsafe" +) + +func fatalMessageStore(e savedEntry) { + // Only put a new one in if we haven't assigned before. + atomic.CompareAndSwapPointer(&fatalMessage, nil, unsafe.Pointer(&e)) +} + +var fatalMessage unsafe.Pointer // savedEntry stored with CompareAndSwapPointer + +// FatalMessage returns the Meta and message contents of the first message +// logged with Fatal severity, or false if none has occurred. +func FatalMessage() (*Meta, []byte, bool) { + e := (*savedEntry)(atomic.LoadPointer(&fatalMessage)) + if e == nil { + return nil, nil, false + } + return e.meta, e.msg, true +} + +// DoNotUseRacyFatalMessage is FatalMessage, but worse. +// +//go:norace +//go:nosplit +func DoNotUseRacyFatalMessage() (*Meta, []byte, bool) { + e := (*savedEntry)(fatalMessage) + if e == nil { + return nil, nil, false + } + return e.meta, e.msg, true +} diff --git a/prometheus-to-sd/vendor/github.com/golang/glog/internal/stackdump/stackdump.go b/prometheus-to-sd/vendor/github.com/golang/glog/internal/stackdump/stackdump.go new file mode 100644 index 000000000..3427c9d6b --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/golang/glog/internal/stackdump/stackdump.go @@ -0,0 +1,127 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stackdump provides wrappers for runtime.Stack and runtime.Callers +// with uniform support for skipping caller frames. +// +// ⚠ Unlike the functions in the runtime package, these may allocate a +// non-trivial quantity of memory: use them with care. ⚠ +package stackdump + +import ( + "bytes" + "runtime" +) + +// runtimeStackSelfFrames is 1 if runtime.Stack includes the call to +// runtime.Stack itself or 0 if it does not. +// +// As of 2016-04-27, the gccgo compiler includes runtime.Stack but the gc +// compiler does not. +var runtimeStackSelfFrames = func() int { + for n := 1 << 10; n < 1<<20; n *= 2 { + buf := make([]byte, n) + n := runtime.Stack(buf, false) + if bytes.Contains(buf[:n], []byte("runtime.Stack")) { + return 1 + } else if n < len(buf) || bytes.Count(buf, []byte("\n")) >= 3 { + return 0 + } + } + return 0 +}() + +// Stack is a stack dump for a single goroutine. +type Stack struct { + // Text is a representation of the stack dump in a human-readable format. + Text []byte + + // PC is a representation of the stack dump using raw program counter values. + PC []uintptr +} + +func (s Stack) String() string { return string(s.Text) } + +// Caller returns the Stack dump for the calling goroutine, starting skipDepth +// frames before the caller of Caller. (Caller(0) provides a dump starting at +// the caller of this function.) +func Caller(skipDepth int) Stack { + return Stack{ + Text: CallerText(skipDepth + 1), + PC: CallerPC(skipDepth + 1), + } +} + +// CallerText returns a textual dump of the stack starting skipDepth frames before +// the caller. (CallerText(0) provides a dump starting at the caller of this +// function.) +func CallerText(skipDepth int) []byte { + for n := 1 << 10; ; n *= 2 { + buf := make([]byte, n) + n := runtime.Stack(buf, false) + if n < len(buf) { + return pruneFrames(skipDepth+1+runtimeStackSelfFrames, buf[:n]) + } + } +} + +// CallerPC returns a dump of the program counters of the stack starting +// skipDepth frames before the caller. (CallerPC(0) provides a dump starting at +// the caller of this function.) +func CallerPC(skipDepth int) []uintptr { + for n := 1 << 8; ; n *= 2 { + buf := make([]uintptr, n) + n := runtime.Callers(skipDepth+2, buf) + if n < len(buf) { + return buf[:n] + } + } +} + +// pruneFrames removes the topmost skipDepth frames of the first goroutine in a +// textual stack dump. It overwrites the passed-in slice. +// +// If there are fewer than skipDepth frames in the first goroutine's stack, +// pruneFrames prunes it to an empty stack and leaves the remaining contents +// intact. +func pruneFrames(skipDepth int, stack []byte) []byte { + headerLen := 0 + for i, c := range stack { + if c == '\n' { + headerLen = i + 1 + break + } + } + if headerLen == 0 { + return stack // No header line - not a well-formed stack trace. + } + + skipLen := headerLen + skipNewlines := skipDepth * 2 + for ; skipLen < len(stack) && skipNewlines > 0; skipLen++ { + c := stack[skipLen] + if c != '\n' { + continue + } + skipNewlines-- + skipLen++ + if skipNewlines == 0 || skipLen == len(stack) || stack[skipLen] == '\n' { + break + } + } + + pruned := stack[skipLen-headerLen:] + copy(pruned, stack[:headerLen]) + return pruned +} diff --git a/prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/decode.go b/prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 000000000..6c16c255f --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,530 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if fd.Cardinality() == protoreflect.Repeated { + return false + } + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" + } + if ed := fd.Enum(); ed != nil { + return ed.FullName() == "google.protobuf.NullValue" + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/encode.go b/prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 000000000..685c80a62 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/json.go b/prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 000000000..480e2448d --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/.travis.yml b/prometheus-to-sd/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 000000000..d8156a60b --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/CONTRIBUTING.md b/prometheus-to-sd/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 000000000..04fdf09f1 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/CONTRIBUTORS b/prometheus-to-sd/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b4bb97f6b --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/LICENSE b/prometheus-to-sd/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/README.md b/prometheus-to-sd/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000..f765a46f9 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/dce.go b/prometheus-to-sd/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/doc.go b/prometheus-to-sd/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/hash.go b/prometheus-to-sd/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..b404f4bec --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/marshal.go b/prometheus-to-sd/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..14bd34072 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/node.go b/prometheus-to-sd/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/node_js.go b/prometheus-to-sd/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..24b78edc9 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/node_net.go b/prometheus-to-sd/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/null.go b/prometheus-to-sd/vendor/github.com/google/uuid/null.go new file mode 100644 index 000000000..d7fcbf286 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/sql.go b/prometheus-to-sd/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..2e02ec06c --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/time.go b/prometheus-to-sd/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..e6ef06cdc --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/util.go b/prometheus-to-sd/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/uuid.go b/prometheus-to-sd/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..a57207aeb --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,294 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/version1.go b/prometheus-to-sd/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..463109629 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/prometheus-to-sd/vendor/github.com/google/uuid/version4.go b/prometheus-to-sd/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..7697802e4 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE b/prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go new file mode 100644 index 000000000..b3283b815 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -0,0 +1,185 @@ +// Copyright 2022 Google LLC. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). +// +// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use. +package client + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/gob" + "errors" + "fmt" + "io" + "net/rpc" + "os" + "os/exec" + + "github.com/googleapis/enterprise-certificate-proxy/client/util" +) + +const signAPI = "EnterpriseCertSigner.Sign" +const certificateChainAPI = "EnterpriseCertSigner.CertificateChain" +const publicKeyAPI = "EnterpriseCertSigner.Public" + +// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser. +type Connection struct { + io.ReadCloser + io.WriteCloser +} + +// Close closes c's underlying ReadCloser and WriteCloser. +func (c *Connection) Close() error { + rerr := c.ReadCloser.Close() + werr := c.WriteCloser.Close() + if rerr != nil { + return rerr + } + return werr +} + +func init() { + gob.Register(crypto.SHA256) + gob.Register(&rsa.PSSOptions{}) +} + +// SignArgs contains arguments to a crypto Signer.Sign method. +type SignArgs struct { + Digest []byte // The content to sign. + Opts crypto.SignerOpts // Options for signing, such as Hash identifier. +} + +// Key implements credential.Credential by holding the executed signer subprocess. +type Key struct { + cmd *exec.Cmd // Pointer to the signer subprocess. + client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess. + publicKey crypto.PublicKey // Public key of loaded certificate. + chain [][]byte // Certificate chain of loaded certificate. +} + +// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key. +func (k *Key) CertificateChain() [][]byte { + return k.chain +} + +// Close closes the RPC connection and kills the signer subprocess. +// Call this to free up resources when the Key object is no longer needed. +func (k *Key) Close() error { + if err := k.cmd.Process.Kill(); err != nil { + return fmt.Errorf("failed to kill signer process: %w", err) + } + // Wait for cmd to exit and release resources. Since the process is forcefully killed, this + // will return a non-nil error (varies by OS), which we will ignore. + _ = k.cmd.Wait() + // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. + // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. + if err := k.client.Close(); err.Error() != "close |0: file already closed" { + return fmt.Errorf("failed to close RPC connection: %w", err) + } + return nil +} + +// Public returns the public key for this Key. +func (k *Key) Public() crypto.PublicKey { + return k.publicKey +} + +// Sign signs a message digest, using the specified signer options. +func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { + if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { + return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) + } + err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed) + return +} + +// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable, +// possibly due to missing config or missing binary path. +var ErrCredUnavailable = errors.New("Cred is unavailable") + +// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate +// related operations, including signing messages with the private key. +// +// The signer binary path is read from the specified configFilePath, if provided. +// Otherwise, use the default config file path. +// +// The config file also specifies which certificate the signer should use. +func Cred(configFilePath string) (*Key, error) { + if configFilePath == "" { + configFilePath = util.GetDefaultConfigFilePath() + } + enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) + if err != nil { + if errors.Is(err, util.ErrConfigUnavailable) { + return nil, ErrCredUnavailable + } + return nil, err + } + k := &Key{ + cmd: exec.Command(enterpriseCertSignerPath, configFilePath), + } + + // Redirect errors from subprocess to parent process. + k.cmd.Stderr = os.Stderr + + // RPC client will communicate with subprocess over stdin/stdout. + kin, err := k.cmd.StdinPipe() + if err != nil { + return nil, err + } + kout, err := k.cmd.StdoutPipe() + if err != nil { + return nil, err + } + k.client = rpc.NewClient(&Connection{kout, kin}) + + if err := k.cmd.Start(); err != nil { + return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err) + } + + if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil { + return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err) + } + + var publicKeyBytes []byte + if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil { + return nil, fmt.Errorf("failed to retrieve public key: %w", err) + } + + publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse public key: %w", err) + } + + var ok bool + k.publicKey, ok = publicKey.(crypto.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid public key type: %T", publicKey) + } + + switch pub := k.publicKey.(type) { + case *rsa.PublicKey: + if pub.Size() < 256 { + return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8) + } + case *ecdsa.PublicKey: + default: + return nil, fmt.Errorf("unsupported public key type: %v", pub) + } + + return k, nil +} diff --git a/prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go new file mode 100644 index 000000000..1640ec1c9 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -0,0 +1,91 @@ +// Copyright 2022 Google LLC. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package util provides helper functions for the client. +package util + +import ( + "encoding/json" + "errors" + "io" + "os" + "os/user" + "path/filepath" + "runtime" +) + +const configFileName = "certificate_config.json" + +// EnterpriseCertificateConfig contains parameters for initializing signer. +type EnterpriseCertificateConfig struct { + Libs Libs `json:"libs"` +} + +// Libs specifies the locations of helper libraries. +type Libs struct { + ECP string `json:"ecp"` +} + +// ErrConfigUnavailable is a sentinel error that indicates ECP config is unavailable, +// possibly due to entire config missing or missing binary path. +var ErrConfigUnavailable = errors.New("Config is unavailable") + +// LoadSignerBinaryPath retrieves the path of the signer binary from the config file. +func LoadSignerBinaryPath(configFilePath string) (path string, err error) { + jsonFile, err := os.Open(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", ErrConfigUnavailable + } + return "", err + } + + byteValue, err := io.ReadAll(jsonFile) + if err != nil { + return "", err + } + var config EnterpriseCertificateConfig + err = json.Unmarshal(byteValue, &config) + if err != nil { + return "", err + } + signerBinaryPath := config.Libs.ECP + if signerBinaryPath == "" { + return "", ErrConfigUnavailable + } + return signerBinaryPath, nil +} + +func guessHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} + +func getDefaultConfigFileDirectory() (directory string) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud") + } + return filepath.Join(guessHomeDir(), ".config/gcloud") +} + +// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud. +func GetDefaultConfigFilePath() (path string) { + return filepath.Join(getDefaultConfigFileDirectory(), configFileName) +} diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json new file mode 100644 index 000000000..10295639c --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + "v2": "2.7.1" +} diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/CHANGES.md new file mode 100644 index 000000000..41a7ca94d --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -0,0 +1,54 @@ +# Changelog + +## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) + + +### Bug Fixes + +* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254) + +## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) + + +### Features + +* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) +* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) + +## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) + + +### Features + +* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) + +## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) + + +### Bug Fixes + +* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) + +## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) + + +### Features + +* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) + +## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) + + +### Features + +* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c)) + + +### Bug Fixes + +* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e)) + + +### Miscellaneous Chores + +* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719)) diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go new file mode 100644 index 000000000..ed862c8b3 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -0,0 +1,347 @@ +// Copyright 2021, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package apierror implements a wrapper error for parsing error details from +// API calls. Both HTTP & gRPC status errors are supported. +package apierror + +import ( + "errors" + "fmt" + "strings" + + jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// ErrDetails holds the google/rpc/error_details.proto messages. +type ErrDetails struct { + ErrorInfo *errdetails.ErrorInfo + BadRequest *errdetails.BadRequest + PreconditionFailure *errdetails.PreconditionFailure + QuotaFailure *errdetails.QuotaFailure + RetryInfo *errdetails.RetryInfo + ResourceInfo *errdetails.ResourceInfo + RequestInfo *errdetails.RequestInfo + DebugInfo *errdetails.DebugInfo + Help *errdetails.Help + LocalizedMessage *errdetails.LocalizedMessage + + // Unknown stores unidentifiable error details. + Unknown []interface{} +} + +// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. +var ErrMessageNotFound = errors.New("message not found") + +// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the +// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, +// the content of the message is copied to the provided message. +// +// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the +// protocol buffer type of the provided message. +func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { + if v == nil { + return ErrMessageNotFound + } + for _, elem := range e.Unknown { + if elemProto, ok := elem.(proto.Message); ok { + if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { + proto.Merge(v, elemProto) + return nil + } + } + } + return ErrMessageNotFound +} + +func (e ErrDetails) String() string { + var d strings.Builder + if e.ErrorInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = ErrorInfo reason = %s domain = %s metadata = %s\n", + e.ErrorInfo.GetReason(), e.ErrorInfo.GetDomain(), e.ErrorInfo.GetMetadata())) + } + + if e.BadRequest != nil { + v := e.BadRequest.GetFieldViolations() + var f []string + var desc []string + for _, x := range v { + f = append(f, x.GetField()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = BadRequest field = %s desc = %s\n", + strings.Join(f, " "), strings.Join(desc, " "))) + } + + if e.PreconditionFailure != nil { + v := e.PreconditionFailure.GetViolations() + var t []string + var s []string + var desc []string + for _, x := range v { + t = append(t, x.GetType()) + s = append(s, x.GetSubject()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = PreconditionFailure type = %s subj = %s desc = %s\n", strings.Join(t, " "), + strings.Join(s, " "), strings.Join(desc, " "))) + } + + if e.QuotaFailure != nil { + v := e.QuotaFailure.GetViolations() + var s []string + var desc []string + for _, x := range v { + s = append(s, x.GetSubject()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = QuotaFailure subj = %s desc = %s\n", + strings.Join(s, " "), strings.Join(desc, " "))) + } + + if e.RequestInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = RequestInfo id = %s data = %s\n", + e.RequestInfo.GetRequestId(), e.RequestInfo.GetServingData())) + } + + if e.ResourceInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = ResourceInfo type = %s resourcename = %s owner = %s desc = %s\n", + e.ResourceInfo.GetResourceType(), e.ResourceInfo.GetResourceName(), + e.ResourceInfo.GetOwner(), e.ResourceInfo.GetDescription())) + + } + if e.RetryInfo != nil { + d.WriteString(fmt.Sprintf("error details: retry in %s\n", e.RetryInfo.GetRetryDelay().AsDuration())) + + } + if e.Unknown != nil { + var s []string + for _, x := range e.Unknown { + s = append(s, fmt.Sprintf("%v", x)) + } + d.WriteString(fmt.Sprintf("error details: name = Unknown desc = %s\n", strings.Join(s, " "))) + } + + if e.DebugInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = DebugInfo detail = %s stack = %s\n", e.DebugInfo.GetDetail(), + strings.Join(e.DebugInfo.GetStackEntries(), " "))) + } + if e.Help != nil { + var desc []string + var url []string + for _, x := range e.Help.Links { + desc = append(desc, x.GetDescription()) + url = append(url, x.GetUrl()) + } + d.WriteString(fmt.Sprintf("error details: name = Help desc = %s url = %s\n", + strings.Join(desc, " "), strings.Join(url, " "))) + } + if e.LocalizedMessage != nil { + d.WriteString(fmt.Sprintf("error details: name = LocalizedMessage locale = %s msg = %s\n", + e.LocalizedMessage.GetLocale(), e.LocalizedMessage.GetMessage())) + } + + return d.String() +} + +// APIError wraps either a gRPC Status error or a HTTP googleapi.Error. It +// implements error and Status interfaces. +type APIError struct { + err error + status *status.Status + httpErr *googleapi.Error + details ErrDetails +} + +// Details presents the error details of the APIError. +func (a *APIError) Details() ErrDetails { + return a.details +} + +// Unwrap extracts the original error. +func (a *APIError) Unwrap() error { + return a.err +} + +// Error returns a readable representation of the APIError. +func (a *APIError) Error() string { + var msg string + if a.httpErr != nil { + // Truncate the googleapi.Error message because it dumps the Details in + // an ugly way. + msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) + } else if a.status != nil { + msg = a.err.Error() + } + return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) +} + +// GRPCStatus extracts the underlying gRPC Status error. +// This method is necessary to fulfill the interface +// described in https://pkg.go.dev/google.golang.org/grpc/status#FromError. +func (a *APIError) GRPCStatus() *status.Status { + return a.status +} + +// Reason returns the reason in an ErrorInfo. +// If ErrorInfo is nil, it returns an empty string. +func (a *APIError) Reason() string { + return a.details.ErrorInfo.GetReason() +} + +// Domain returns the domain in an ErrorInfo. +// If ErrorInfo is nil, it returns an empty string. +func (a *APIError) Domain() string { + return a.details.ErrorInfo.GetDomain() +} + +// Metadata returns the metadata in an ErrorInfo. +// If ErrorInfo is nil, it returns nil. +func (a *APIError) Metadata() map[string]string { + return a.details.ErrorInfo.GetMetadata() + +} + +// setDetailsFromError parses a Status error or a googleapi.Error +// and sets status and details or httpErr and details, respectively. +// It returns false if neither Status nor googleapi.Error can be parsed. +// When err is a googleapi.Error, the status of the returned error will +// be set to an Unknown error, rather than nil, since a nil code is +// interpreted as OK in the gRPC status package. +func (a *APIError) setDetailsFromError(err error) bool { + st, isStatus := status.FromError(err) + var herr *googleapi.Error + isHTTPErr := errors.As(err, &herr) + + switch { + case isStatus: + a.status = st + a.details = parseDetails(st.Details()) + case isHTTPErr: + a.httpErr = herr + a.details = parseHTTPDetails(herr) + a.status = status.New(codes.Unknown, herr.Message) + default: + return false + } + return true +} + +// FromError parses a Status error or a googleapi.Error and builds an +// APIError, wrapping the provided error in the new APIError. It +// returns false if neither Status nor googleapi.Error can be parsed. +func FromError(err error) (*APIError, bool) { + return ParseError(err, true) +} + +// ParseError parses a Status error or a googleapi.Error and builds an +// APIError. If wrap is true, it wraps the error in the new APIError. +// It returns false if neither Status nor googleapi.Error can be parsed. +func ParseError(err error, wrap bool) (*APIError, bool) { + if err == nil { + return nil, false + } + ae := APIError{} + if wrap { + ae = APIError{err: err} + } + if !ae.setDetailsFromError(err) { + return nil, false + } + return &ae, true +} + +// parseDetails accepts a slice of interface{} that should be backed by some +// sort of proto.Message that can be cast to the google/rpc/error_details.proto +// types. +// +// This is for internal use only. +func parseDetails(details []interface{}) ErrDetails { + var ed ErrDetails + for _, d := range details { + switch d := d.(type) { + case *errdetails.ErrorInfo: + ed.ErrorInfo = d + case *errdetails.BadRequest: + ed.BadRequest = d + case *errdetails.PreconditionFailure: + ed.PreconditionFailure = d + case *errdetails.QuotaFailure: + ed.QuotaFailure = d + case *errdetails.RetryInfo: + ed.RetryInfo = d + case *errdetails.ResourceInfo: + ed.ResourceInfo = d + case *errdetails.RequestInfo: + ed.RequestInfo = d + case *errdetails.DebugInfo: + ed.DebugInfo = d + case *errdetails.Help: + ed.Help = d + case *errdetails.LocalizedMessage: + ed.LocalizedMessage = d + default: + ed.Unknown = append(ed.Unknown, d) + } + } + + return ed +} + +// parseHTTPDetails will convert the given googleapi.Error into the protobuf +// representation then parse the Any values that contain the error details. +// +// This is for internal use only. +func parseHTTPDetails(gae *googleapi.Error) ErrDetails { + e := &jsonerror.Error{} + if err := protojson.Unmarshal([]byte(gae.Body), e); err != nil { + // If the error body does not conform to the error schema, ignore it + // altogther. See https://cloud.google.com/apis/design/errors#http_mapping. + return ErrDetails{} + } + + // Coerce the Any messages into proto.Message then parse the details. + details := []interface{}{} + for _, any := range e.GetError().GetDetails() { + m, err := any.UnmarshalNew() + if err != nil { + // Ignore malformed Any values. + continue + } + details = append(details, m) + } + + return parseDetails(details) +} diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md new file mode 100644 index 000000000..9ff0caea9 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md @@ -0,0 +1,30 @@ +# HTTP JSON Error Schema + +The `error.proto` represents the HTTP-JSON schema used by Google APIs to convey +error payloads as described by https://cloud.google.com/apis/design/errors#http_mapping. +This package is for internal parsing logic only and should not be used in any +other context. + +## Regeneration + +To regenerate the protobuf Go code you will need the following: + +* A local copy of [googleapis], the absolute path to which should be exported to +the environment variable `GOOGLEAPIS` +* The protobuf compiler [protoc] +* The Go [protobuf plugin] +* The [goimports] tool + +From this directory run the following command: +```sh +protoc -I $GOOGLEAPIS -I. --go_out=. --go_opt=module=github.com/googleapis/gax-go/v2/apierror/internal/proto error.proto +goimports -w . +``` + +Note: the `module` plugin option ensures the generated code is placed in this +directory, and not in several nested directories defined by `go_package` option. + +[googleapis]: https://github.com/googleapis/googleapis +[protoc]: https://github.com/protocolbuffers/protobuf#protocol-compiler-installation +[protobuf plugin]: https://developers.google.com/protocol-buffers/docs/reference/go-generated +[goimports]: https://pkg.go.dev/golang.org/x/tools/cmd/goimports \ No newline at end of file diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go new file mode 100644 index 000000000..e4b03f161 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go @@ -0,0 +1,256 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.17.3 +// source: custom_error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Error code for `CustomError`. +type CustomError_CustomErrorCode int32 + +const ( + // Default error. + CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 + // Too many foo. + CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 + // Not enough foo. + CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 + // Catastrophic error. + CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 +) + +// Enum value maps for CustomError_CustomErrorCode. +var ( + CustomError_CustomErrorCode_name = map[int32]string{ + 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", + 1: "TOO_MANY_FOO", + 2: "NOT_ENOUGH_FOO", + 3: "UNIVERSE_WAS_DESTROYED", + } + CustomError_CustomErrorCode_value = map[string]int32{ + "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, + "TOO_MANY_FOO": 1, + "NOT_ENOUGH_FOO": 2, + "UNIVERSE_WAS_DESTROYED": 3, + } +) + +func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { + p := new(CustomError_CustomErrorCode) + *p = x + return p +} + +func (x CustomError_CustomErrorCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { + return file_custom_error_proto_enumTypes[0].Descriptor() +} + +func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { + return &file_custom_error_proto_enumTypes[0] +} + +func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. +func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0, 0} +} + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +type CustomError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error code specific to the custom API being invoked. + Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` + // Name of the failed entity. + Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` + // Message that describes the error. + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CustomError) Reset() { + *x = CustomError{} + if protoimpl.UnsafeEnabled { + mi := &file_custom_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomError) ProtoMessage() {} + +func (x *CustomError) ProtoReflect() protoreflect.Message { + mi := &file_custom_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. +func (*CustomError) Descriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomError) GetCode() CustomError_CustomErrorCode { + if x != nil { + return x.Code + } + return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED +} + +func (x *CustomError) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *CustomError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_custom_error_proto protoreflect.FileDescriptor + +var file_custom_error_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, + 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f, + 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, + 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53, + 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_custom_error_proto_rawDescOnce sync.Once + file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc +) + +func file_custom_error_proto_rawDescGZIP() []byte { + file_custom_error_proto_rawDescOnce.Do(func() { + file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData) + }) + return file_custom_error_proto_rawDescData +} + +var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_custom_error_proto_goTypes = []interface{}{ + (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode + (*CustomError)(nil), // 1: error.CustomError +} +var file_custom_error_proto_depIdxs = []int32{ + 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_custom_error_proto_init() } +func file_custom_error_proto_init() { + if File_custom_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_custom_error_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_custom_error_proto_goTypes, + DependencyIndexes: file_custom_error_proto_depIdxs, + EnumInfos: file_custom_error_proto_enumTypes, + MessageInfos: file_custom_error_proto_msgTypes, + }.Build() + File_custom_error_proto = out.File + file_custom_error_proto_rawDesc = nil + file_custom_error_proto_goTypes = nil + file_custom_error_proto_depIdxs = nil +} diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto new file mode 100644 index 000000000..21678ae65 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto @@ -0,0 +1,50 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +message CustomError { + + // Error code for `CustomError`. + enum CustomErrorCode { + // Default error. + CUSTOM_ERROR_CODE_UNSPECIFIED = 0; + + // Too many foo. + TOO_MANY_FOO = 1; + + // Not enough foo. + NOT_ENOUGH_FOO = 2; + + // Catastrophic error. + UNIVERSE_WAS_DESTROYED = 3; + + } + + // Error code specific to the custom API being invoked. + CustomErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go new file mode 100644 index 000000000..7dd9b8373 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go @@ -0,0 +1,280 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.15.8 +// source: apierror/internal/proto/error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + code "google.golang.org/genproto/googleapis/rpc/code" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The error format v2 for Google JSON REST APIs. +// Copied from https://cloud.google.com/apis/design/errors#http_mapping. +// +// NOTE: This schema is not used for other wire protocols. +type Error struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The actual error payload. The nested message structure is for backward + // compatibility with Google API client libraries. It also makes the error + // more readable to developers. + Error *Error_Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *Error) Reset() { + *x = Error{} + if protoimpl.UnsafeEnabled { + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Error) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Error) ProtoMessage() {} + +func (x *Error) ProtoReflect() protoreflect.Message { + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Error.ProtoReflect.Descriptor instead. +func (*Error) Descriptor() ([]byte, []int) { + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0} +} + +func (x *Error) GetError() *Error_Status { + if x != nil { + return x.Error + } + return nil +} + +// This message has the same semantics as `google.rpc.Status`. It uses HTTP +// status code instead of gRPC status code. It has an extra field `status` +// for backward compatibility with Google API Client Libraries. +type Error_Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP status code that corresponds to `google.rpc.Status.code`. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // This corresponds to `google.rpc.Status.message`. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // This is the enum version for `google.rpc.Status.code`. + Status code.Code `protobuf:"varint,4,opt,name=status,proto3,enum=google.rpc.Code" json:"status,omitempty"` + // This corresponds to `google.rpc.Status.details`. + Details []*anypb.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Error_Status) Reset() { + *x = Error_Status{} + if protoimpl.UnsafeEnabled { + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Error_Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Error_Status) ProtoMessage() {} + +func (x *Error_Status) ProtoReflect() protoreflect.Message { + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead. +func (*Error_Status) Descriptor() ([]byte, []int) { + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Error_Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Error_Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Error_Status) GetStatus() code.Code { + if x != nil { + return x.Status + } + return code.Code(0) +} + +func (x *Error_Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor + +var file_apierror_internal_proto_error_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, + 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_apierror_internal_proto_error_proto_rawDescOnce sync.Once + file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc +) + +func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte { + file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() { + file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData) + }) + return file_apierror_internal_proto_error_proto_rawDescData +} + +var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_apierror_internal_proto_error_proto_goTypes = []interface{}{ + (*Error)(nil), // 0: error.Error + (*Error_Status)(nil), // 1: error.Error.Status + (code.Code)(0), // 2: google.rpc.Code + (*anypb.Any)(nil), // 3: google.protobuf.Any +} +var file_apierror_internal_proto_error_proto_depIdxs = []int32{ + 1, // 0: error.Error.error:type_name -> error.Error.Status + 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code + 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_apierror_internal_proto_error_proto_init() } +func file_apierror_internal_proto_error_proto_init() { + if File_apierror_internal_proto_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Error); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Error_Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_apierror_internal_proto_error_proto_goTypes, + DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs, + MessageInfos: file_apierror_internal_proto_error_proto_msgTypes, + }.Build() + File_apierror_internal_proto_error_proto = out.File + file_apierror_internal_proto_error_proto_rawDesc = nil + file_apierror_internal_proto_error_proto_goTypes = nil + file_apierror_internal_proto_error_proto_depIdxs = nil +} diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto new file mode 100644 index 000000000..4b9b13ce1 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto @@ -0,0 +1,46 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +import "google/protobuf/any.proto"; +import "google/rpc/code.proto"; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + +// The error format v2 for Google JSON REST APIs. +// Copied from https://cloud.google.com/apis/design/errors#http_mapping. +// +// NOTE: This schema is not used for other wire protocols. +message Error { + // This message has the same semantics as `google.rpc.Status`. It uses HTTP + // status code instead of gRPC status code. It has an extra field `status` + // for backward compatibility with Google API Client Libraries. + message Status { + // The HTTP status code that corresponds to `google.rpc.Status.code`. + int32 code = 1; + // This corresponds to `google.rpc.Status.message`. + string message = 2; + // This is the enum version for `google.rpc.Status.code`. + google.rpc.Code status = 4; + // This corresponds to `google.rpc.Status.details`. + repeated google.protobuf.Any details = 5; + } + // The actual error payload. The nested message structure is for backward + // compatibility with Google API client libraries. It also makes the error + // more readable to developers. + Status error = 1; +} diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/call_option.go b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/call_option.go index b1d53dd19..e09200556 100644 --- a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/call_option.go +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -30,9 +30,11 @@ package gax import ( + "errors" "math/rand" "time" + "google.golang.org/api/googleapi" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -47,7 +49,7 @@ type CallOption interface { // Retryer is used by Invoke to determine retry behavior. type Retryer interface { - // Retry reports whether a request should be retriedand how long to pause before retrying + // Retry reports whether a request should be retried and how long to pause before retrying // if the previous attempt returned with err. Invoke never calls Retry with nil error. Retry(err error) (pause time.Duration, shouldRetry bool) } @@ -63,6 +65,31 @@ func WithRetry(fn func() Retryer) CallOption { return retryerOption(fn) } +// OnErrorFunc returns a Retryer that retries if and only if the previous attempt +// returns an error that satisfies shouldRetry. +// +// Pause times between retries are specified by bo. bo is only used for its +// parameters; each Retryer has its own copy. +func OnErrorFunc(bo Backoff, shouldRetry func(err error) bool) Retryer { + return &errorRetryer{ + shouldRetry: shouldRetry, + backoff: bo, + } +} + +type errorRetryer struct { + backoff Backoff + shouldRetry func(err error) bool +} + +func (r *errorRetryer) Retry(err error) (time.Duration, bool) { + if r.shouldRetry(err) { + return r.backoff.Pause(), true + } + + return 0, false +} + // OnCodes returns a Retryer that retries if and only if // the previous attempt returns a GRPC error whose error code is stored in cc. // Pause times between retries are specified by bo. @@ -94,22 +121,60 @@ func (r *boRetryer) Retry(err error) (time.Duration, bool) { return 0, false } -// Backoff implements exponential backoff. -// The wait time between retries is a random value between 0 and the "retry envelope". -// The envelope starts at Initial and increases by the factor of Multiplier every retry, -// but is capped at Max. +// OnHTTPCodes returns a Retryer that retries if and only if +// the previous attempt returns a googleapi.Error whose status code is stored in +// cc. Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnHTTPCodes(bo Backoff, cc ...int) Retryer { + codes := make(map[int]bool, len(cc)) + for _, c := range cc { + codes[c] = true + } + + return &httpRetryer{ + backoff: bo, + codes: codes, + } +} + +type httpRetryer struct { + backoff Backoff + codes map[int]bool +} + +func (r *httpRetryer) Retry(err error) (time.Duration, bool) { + var gerr *googleapi.Error + if !errors.As(err, &gerr) { + return 0, false + } + + if r.codes[gerr.Code] { + return r.backoff.Pause(), true + } + + return 0, false +} + +// Backoff implements exponential backoff. The wait time between retries is a +// random value between 0 and the "retry period" - the time between retries. The +// retry period starts at Initial and increases by the factor of Multiplier +// every retry, but is capped at Max. +// +// Note: MaxNumRetries / RPCDeadline is specifically not provided. These should +// be built on top of Backoff. type Backoff struct { - // Initial is the initial value of the retry envelope, defaults to 1 second. + // Initial is the initial value of the retry period, defaults to 1 second. Initial time.Duration - // Max is the maximum value of the retry envelope, defaults to 30 seconds. + // Max is the maximum value of the retry period, defaults to 30 seconds. Max time.Duration - // Multiplier is the factor by which the retry envelope increases. + // Multiplier is the factor by which the retry period increases. // It should be greater than 1 and defaults to 2. Multiplier float64 - // cur is the current retry envelope + // cur is the current retry period. cur time.Duration } @@ -145,6 +210,21 @@ func (o grpcOpt) Resolve(s *CallSettings) { s.GRPC = o } +type pathOpt struct { + p string +} + +func (p pathOpt) Resolve(s *CallSettings) { + s.Path = p.p +} + +// WithPath applies a Path override to the HTTP-based APICall. +// +// This is for internal use only. +func WithPath(p string) CallOption { + return &pathOpt{p: p} +} + // WithGRPCOptions allows passing gRPC call options during client creation. func WithGRPCOptions(opt ...grpc.CallOption) CallOption { return grpcOpt(append([]grpc.CallOption(nil), opt...)) @@ -158,4 +238,7 @@ type CallSettings struct { // CallOptions to be forwarded to GRPC. GRPC []grpc.CallOption + + // Path is an HTTP override for an APICall. + Path string } diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/content_type.go b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/content_type.go new file mode 100644 index 000000000..1b53d0a3a --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/content_type.go @@ -0,0 +1,112 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "io" + "io/ioutil" + "net/http" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// The content of media will be sniffed to determine the content type. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader) (io.Reader, string) { + // For backwards compatibility, allow clients to set content + // type by providing a ContentTyper for media. + // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. + if typer, ok := media.(interface { + ContentType() string + }); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/gax.go b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/gax.go index 3fd1b0b84..36cdfa33e 100644 --- a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/gax.go +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -35,5 +35,7 @@ // to simplify code generation and to provide more convenient and idiomatic API surfaces. package gax +import "github.com/googleapis/gax-go/v2/internal" + // Version specifies the gax-go version being used. -const Version = "2.0.4" +const Version = internal.Version diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/internal/version.go new file mode 100644 index 000000000..936873ec4 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -0,0 +1,33 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package internal + +// Version is the current tagged release of the library. +const Version = "2.7.1" diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/invoke.go b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/invoke.go index fe31dd004..9fcc29959 100644 --- a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/invoke.go +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -33,13 +33,15 @@ import ( "context" "strings" "time" + + "github.com/googleapis/gax-go/v2/apierror" ) // APICall is a user defined call stub. type APICall func(context.Context, CallSettings) error -// Invoke calls the given APICall, -// performing retries as specified by opts, if any. +// Invoke calls the given APICall, performing retries as specified by opts, if +// any. func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { var settings CallSettings for _, opt := range opts { @@ -71,9 +73,6 @@ func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper if err == nil { return nil } - if settings.Retry == nil { - return err - } // Never retry permanent certificate errors. (e.x. if ca-certificates // are not installed). We should only make very few, targeted // exceptions: many (other) status=Unavailable should be retried, such @@ -83,6 +82,12 @@ func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { return err } + if apierr, ok := apierror.FromError(err); ok { + err = apierr + } + if settings.Retry == nil { + return err + } if retryer == nil { if r := settings.Retry(); r != nil { retryer = r diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go new file mode 100644 index 000000000..cc4486eb9 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go @@ -0,0 +1,126 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "encoding/json" + "errors" + "io" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + arrayOpen = json.Delim('[') + arrayClose = json.Delim(']') + errBadOpening = errors.New("unexpected opening token, expected '['") +) + +// ProtoJSONStream represents a wrapper for consuming a stream of protobuf +// messages encoded using protobuf-JSON format. More information on this format +// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json. +// The stream must appear as a comma-delimited, JSON array of obbjects with +// opening and closing square braces. +// +// This is for internal use only. +type ProtoJSONStream struct { + first, closed bool + reader io.ReadCloser + stream *json.Decoder + typ protoreflect.MessageType +} + +// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are +// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream +// must be closed when done. +// +// This is for internal use only. +func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream { + return &ProtoJSONStream{ + first: true, + reader: rc, + stream: json.NewDecoder(rc), + typ: typ, + } +} + +// Recv decodes the next protobuf message in the stream or returns io.EOF if +// the stream is done. It is not safe to call Recv on the same stream from +// different goroutines, just like it is not safe to do so with a single gRPC +// stream. Type-cast the protobuf message returned to the type provided at +// ProtoJSONStream creation. +// Calls to Recv after calling Close will produce io.EOF. +func (s *ProtoJSONStream) Recv() (proto.Message, error) { + if s.closed { + return nil, io.EOF + } + if s.first { + s.first = false + + // Consume the opening '[' so Decode gets one object at a time. + if t, err := s.stream.Token(); err != nil { + return nil, err + } else if t != arrayOpen { + return nil, errBadOpening + } + } + + // Capture the next block of data for the item (a JSON object) in the stream. + var raw json.RawMessage + if err := s.stream.Decode(&raw); err != nil { + e := err + // To avoid checking the first token of each stream, just attempt to + // Decode the next blob and if that fails, double check if it is just + // the closing token ']'. If it is the closing, return io.EOF. If it + // isn't, return the original error. + if t, _ := s.stream.Token(); t == arrayClose { + e = io.EOF + } + return nil, e + } + + // Initialize a new instance of the protobuf message to unmarshal the + // raw data into. + m := s.typ.New().Interface() + err := protojson.Unmarshal(raw, m) + + return m, err +} + +// Close closes the stream so that resources are cleaned up. +func (s *ProtoJSONStream) Close() error { + // Dereference the *json.Decoder so that the memory is gc'd. + s.stream = nil + s.closed = true + + return s.reader.Close() +} diff --git a/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/release-please-config.json new file mode 100644 index 000000000..61ee266a1 --- /dev/null +++ b/prometheus-to-sd/vendor/github.com/googleapis/gax-go/v2/release-please-config.json @@ -0,0 +1,10 @@ +{ + "release-type": "go-yoshi", + "separate-pull-requests": true, + "include-component-in-tag": false, + "packages": { + "v2": { + "component": "v2" + } + } +} diff --git a/prometheus-to-sd/vendor/go.opencensus.io/.travis.yml b/prometheus-to-sd/vendor/go.opencensus.io/.travis.yml deleted file mode 100644 index bd6b66ee8..000000000 --- a/prometheus-to-sd/vendor/go.opencensus.io/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go_import_path: go.opencensus.io - -go: - - 1.11.x - -env: - global: - GO111MODULE=on - -before_script: - - make install-tools - -script: - - make travis-ci - - go run internal/check/version.go # TODO move this to makefile diff --git a/prometheus-to-sd/vendor/go.opencensus.io/Makefile b/prometheus-to-sd/vendor/go.opencensus.io/Makefile index b3ce3df30..d896edc99 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/Makefile +++ b/prometheus-to-sd/vendor/go.opencensus.io/Makefile @@ -91,7 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/lint/golint - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/tools/cmd/goimports - go get -u github.com/rakyll/embedmd + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/prometheus-to-sd/vendor/go.opencensus.io/opencensus.go b/prometheus-to-sd/vendor/go.opencensus.io/opencensus.go index e5e4b4368..11e31f421 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/opencensus.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.23.0" + return "0.24.0" } diff --git a/prometheus-to-sd/vendor/go.opencensus.io/plugin/ochttp/server.go b/prometheus-to-sd/vendor/go.opencensus.io/plugin/ochttp/server.go index c7ea64235..f7c8434be 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -31,14 +31,14 @@ import ( // Handler is an http.Handler wrapper to instrument your HTTP server with // OpenCensus. It supports both stats and tracing. // -// Tracing +// # Tracing // // This handler is aware of the incoming request's span, reading it from request // headers as configured using the Propagation field. // The extracted span can be accessed from the incoming request's // context. // -// span := trace.FromContext(r.Context()) +// span := trace.FromContext(r.Context()) // // The server span will be automatically ended at the end of ServeHTTP. type Handler struct { @@ -224,7 +224,9 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) { } // wrappedResponseWriter returns a wrapped version of the original -// ResponseWriter and only implements the same combination of additional +// +// ResponseWriter and only implements the same combination of additional +// // interfaces as the original. // This implementation is based on https://github.com/felixge/httpsnoop. func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { diff --git a/prometheus-to-sd/vendor/go.opencensus.io/stats/doc.go b/prometheus-to-sd/vendor/go.opencensus.io/stats/doc.go index 00d473ee0..31477a464 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/stats/doc.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/stats/doc.go @@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording. OpenCensus allows users to create typed measures, record measurements, aggregate the collected data, and export the aggregated data. -Measures +# Measures A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures @@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then create views and collect and break down measures by the tags they are interested in. -Recording measurements +# Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms @@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. -Exemplars +# Exemplars For a given recorded measurement, the associated exemplar is a diagnostic map that gives more information about the measurement. @@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen When exported to a supporting back end, you should be able to easily navigate to example traces that fell into each bucket in the Distribution. - */ package stats // import "go.opencensus.io/stats" diff --git a/prometheus-to-sd/vendor/go.opencensus.io/stats/internal/record.go b/prometheus-to-sd/vendor/go.opencensus.io/stats/internal/record.go index 36935e629..436dc791f 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/stats/internal/record.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/stats/internal/record.go @@ -21,5 +21,11 @@ import ( // DefaultRecorder will be called for each Record call. var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) +// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but +// avoids interface{} conversion. +// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, +// but is interface{} here to avoid import loops +var MeasurementRecorder interface{} + // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/prometheus-to-sd/vendor/go.opencensus.io/stats/record.go b/prometheus-to-sd/vendor/go.opencensus.io/stats/record.go index 2b9728346..8b5b99803 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/stats/record.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/stats/record.go @@ -86,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions { return o } +type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) + // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { - RecordWithOptions(ctx, WithMeasurements(ms...)) + // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality + // (RecordOptions) we can reduce some allocations to speed up this hot path + if len(ms) == 0 { + return + } + recorder := internal.MeasurementRecorder.(measurementRecorder) + record := false + for _, m := range ms { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return + } + recorder(tag.FromContext(ctx), ms, nil) + return } // RecordWithTags records one or multiple measurements at once. diff --git a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/aggregation.go b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/aggregation.go index 9d7093728..61f72d20d 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/aggregation.go @@ -15,6 +15,8 @@ package view +import "time" + // AggType represents the type of aggregation function used on a View. type AggType int @@ -45,20 +47,20 @@ type Aggregation struct { Type AggType // Type is the AggType of this Aggregation. Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. - newData func() AggregationData + newData func(time.Time) AggregationData } var ( aggCount = &Aggregation{ Type: AggTypeCount, - newData: func() AggregationData { - return &CountData{} + newData: func(t time.Time) AggregationData { + return &CountData{Start: t} }, } aggSum = &Aggregation{ Type: AggTypeSum, - newData: func() AggregationData { - return &SumData{} + newData: func(t time.Time) AggregationData { + return &SumData{Start: t} }, } ) @@ -88,9 +90,9 @@ func Sum() *Aggregation { // // If len(bounds) >= 2 then the boundaries for bucket index i are: // -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length // // If len(bounds) is 0 then there is no histogram associated with the // distribution. There will be a single bucket with boundaries @@ -103,8 +105,8 @@ func Distribution(bounds ...float64) *Aggregation { Type: AggTypeDistribution, Buckets: bounds, } - agg.newData = func() AggregationData { - return newDistributionData(agg) + agg.newData = func(t time.Time) AggregationData { + return newDistributionData(agg, t) } return agg } @@ -114,7 +116,7 @@ func Distribution(bounds ...float64) *Aggregation { func LastValue() *Aggregation { return &Aggregation{ Type: AggTypeLastValue, - newData: func() AggregationData { + newData: func(_ time.Time) AggregationData { return &LastValueData{} }, } diff --git a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/aggregation_data.go b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/aggregation_data.go index f331d456e..d93b52066 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -31,6 +31,7 @@ type AggregationData interface { clone() AggregationData equal(other AggregationData) bool toPoint(t metricdata.Type, time time.Time) metricdata.Point + StartTime() time.Time } const epsilon = 1e-9 @@ -40,6 +41,7 @@ const epsilon = 1e-9 // // Most users won't directly access count data. type CountData struct { + Start time.Time Value int64 } @@ -50,7 +52,7 @@ func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) } func (a *CountData) clone() AggregationData { - return &CountData{Value: a.Value} + return &CountData{Value: a.Value, Start: a.Start} } func (a *CountData) equal(other AggregationData) bool { @@ -59,7 +61,7 @@ func (a *CountData) equal(other AggregationData) bool { return false } - return a.Value == a2.Value + return a.Start.Equal(a2.Start) && a.Value == a2.Value } func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { @@ -71,11 +73,17 @@ func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata. } } +// StartTime returns the start time of the data being aggregated by CountData. +func (a *CountData) StartTime() time.Time { + return a.Start +} + // SumData is the aggregated data for the Sum aggregation. // A sum aggregation processes data and sums up the recordings. // // Most users won't directly access sum data. type SumData struct { + Start time.Time Value float64 } @@ -86,7 +94,7 @@ func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { } func (a *SumData) clone() AggregationData { - return &SumData{Value: a.Value} + return &SumData{Value: a.Value, Start: a.Start} } func (a *SumData) equal(other AggregationData) bool { @@ -94,7 +102,7 @@ func (a *SumData) equal(other AggregationData) bool { if !ok { return false } - return math.Pow(a.Value-a2.Value, 2) < epsilon + return a.Start.Equal(a2.Start) && math.Pow(a.Value-a2.Value, 2) < epsilon } func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { @@ -108,6 +116,11 @@ func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Po } } +// StartTime returns the start time of the data being aggregated by SumData. +func (a *SumData) StartTime() time.Time { + return a.Start +} + // DistributionData is the aggregated data for the // Distribution aggregation. // @@ -126,9 +139,10 @@ type DistributionData struct { // an exemplar for the associated bucket, or nil. ExemplarsPerBucket []*metricdata.Exemplar bounds []float64 // histogram distribution of the values + Start time.Time } -func newDistributionData(agg *Aggregation) *DistributionData { +func newDistributionData(agg *Aggregation, t time.Time) *DistributionData { bucketCount := len(agg.Buckets) + 1 return &DistributionData{ CountPerBucket: make([]int64, bucketCount), @@ -136,6 +150,7 @@ func newDistributionData(agg *Aggregation) *DistributionData { bounds: agg.Buckets, Min: math.MaxFloat64, Max: math.SmallestNonzeroFloat64, + Start: t, } } @@ -226,7 +241,11 @@ func (a *DistributionData) equal(other AggregationData) bool { return false } } - return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon + return a.Start.Equal(a2.Start) && + a.Count == a2.Count && + a.Min == a2.Min && + a.Max == a2.Max && + math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon } func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { @@ -256,6 +275,11 @@ func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metr } } +// StartTime returns the start time of the data being aggregated by DistributionData. +func (a *DistributionData) StartTime() time.Time { + return a.Start +} + // LastValueData returns the last value recorded for LastValue aggregation. type LastValueData struct { Value float64 @@ -291,3 +315,22 @@ func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricd panic("unsupported metricdata.Type") } } + +// StartTime returns an empty time value as start time is not recorded when using last value +// aggregation. +func (l *LastValueData) StartTime() time.Time { + return time.Time{} +} + +// ClearStart clears the Start field from data if present. Useful for testing in cases where the +// start time will be nondeterministic. +func ClearStart(data AggregationData) { + switch data := data.(type) { + case *CountData: + data.Start = time.Time{} + case *SumData: + data.Start = time.Time{} + case *DistributionData: + data.Start = time.Time{} + } +} diff --git a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/collector.go b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/collector.go index 8a6a2c0fd..bcd6e08c7 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/collector.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/collector.go @@ -35,7 +35,7 @@ type collector struct { func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { aggregator, ok := c.signatures[s] if !ok { - aggregator = c.a.newData() + aggregator = c.a.newData(t) c.signatures[s] = aggregator } aggregator.addSample(v, attachments, t) @@ -59,8 +59,15 @@ func (c *collector) clearRows() { // encodeWithKeys encodes the map by using values // only associated with the keys provided. func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + // Compute the buffer length we will need ahead of time to avoid resizing later + reqLen := 0 + for _, k := range keys { + s, _ := m.Value(k) + // We will store each key + its length + reqLen += len(s) + 1 + } vb := &tagencoding.Values{ - Buffer: make([]byte, len(keys)), + Buffer: make([]byte, reqLen), } for _, k := range keys { v, _ := m.Value(k) diff --git a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/doc.go b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/doc.go index 7bbedfe1f..60bf0e392 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/doc.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/doc.go @@ -34,7 +34,7 @@ // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. // -// Exporting +// # Exporting // // Collected and aggregated data can be exported to a metric collection // backend by registering its exporter. diff --git a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/view_to_metric.go b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/view_to_metric.go index 5e1656a1f..57d615ec7 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/view_to_metric.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -119,20 +119,15 @@ func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.La return labelValues } -func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { +func rowToTimeseries(v *viewInternal, row *Row, now time.Time) *metricdata.TimeSeries { return &metricdata.TimeSeries{ Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), - StartTime: startTime, + StartTime: row.Data.StartTime(), } } -func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time, startTime time.Time) *metricdata.Metric { - if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || - v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { - startTime = time.Time{} - } - +func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time) *metricdata.Metric { rows := v.collectedRows() if len(rows) == 0 { return nil @@ -140,7 +135,7 @@ func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time, startTim ts := []*metricdata.TimeSeries{} for _, row := range rows { - ts = append(ts, rowToTimeseries(v, row, now, startTime)) + ts = append(ts, rowToTimeseries(v, row, now)) } m := &metricdata.Metric{ diff --git a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/worker.go b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/worker.go index ab8bfd46d..6a79cd8a3 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/stats/view/worker.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/stats/view/worker.go @@ -33,6 +33,7 @@ func init() { defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record + internal.MeasurementRecorder = recordMeasurement } type measureRef struct { @@ -41,9 +42,9 @@ type measureRef struct { } type worker struct { - measures map[string]*measureRef - views map[string]*viewInternal - startTimes map[*viewInternal]time.Time + measures map[string]*measureRef + views map[string]*viewInternal + viewStartTimes map[*viewInternal]time.Time timer *time.Ticker c chan command @@ -199,11 +200,21 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { defaultWorker.Record(tags, ms, attachments) } +func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { + defaultWorker.recordMeasurement(tags, ms, attachments) +} + // Record records a set of measurements ms associated with the given tags and attachments. func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) +} + +// recordMeasurement records a set of measurements ms associated with the given tags and attachments. +// This is the same as Record but without an interface{} type to avoid allocations +func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { req := &recordReq{ tm: tags, - ms: ms.([]stats.Measurement), + ms: ms, attachments: attachments, t: time.Now(), } @@ -221,6 +232,11 @@ func SetReportingPeriod(d time.Duration) { defaultWorker.SetReportingPeriod(d) } +// Stop stops the default worker. +func Stop() { + defaultWorker.Stop() +} + // SetReportingPeriod sets the interval between reporting aggregated views in // the program. If duration is less than or equal to zero, it enables the // default behavior. @@ -244,13 +260,13 @@ func (w *worker) SetReportingPeriod(d time.Duration) { // a single process. func NewMeter() Meter { return &worker{ - measures: make(map[string]*measureRef), - views: make(map[string]*viewInternal), - startTimes: make(map[*viewInternal]time.Time), - timer: time.NewTicker(defaultReportingDuration), - c: make(chan command, 1024), - quit: make(chan bool), - done: make(chan bool), + measures: make(map[string]*measureRef), + views: make(map[string]*viewInternal), + viewStartTimes: make(map[*viewInternal]time.Time), + timer: time.NewTicker(defaultReportingDuration), + c: make(chan command, 1024), + quit: make(chan bool), + done: make(chan bool), exporters: make(map[Exporter]struct{}), } @@ -281,7 +297,7 @@ func (w *worker) start() { case <-w.quit: w.timer.Stop() close(w.c) - w.done <- true + close(w.done) return } } @@ -290,8 +306,11 @@ func (w *worker) start() { func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) - - w.quit <- true + select { + case <-w.quit: + default: + close(w.quit) + } <-w.done } @@ -324,7 +343,7 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return x, nil } w.views[vi.view.Name] = vi - w.startTimes[vi] = time.Now() + w.viewStartTimes[vi] = time.Now() ref := w.getMeasureRef(vi.view.Measure.Name()) ref.views[vi] = struct{}{} return vi, nil @@ -334,7 +353,7 @@ func (w *worker) unregisterView(v *viewInternal) { w.mu.Lock() defer w.mu.Unlock() delete(w.views, v.view.Name) - delete(w.startTimes, v) + delete(w.viewStartTimes, v) if measure := w.measures[v.view.Measure.Name()]; measure != nil { delete(measure.views, v) } @@ -347,7 +366,7 @@ func (w *worker) reportView(v *viewInternal) { rows := v.collectedRows() viewData := &Data{ View: v.view, - Start: w.startTimes[v], + Start: w.viewStartTimes[v], End: time.Now(), Rows: rows, } @@ -371,15 +390,7 @@ func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { return nil } - var startTime time.Time - if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || - v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { - startTime = time.Time{} - } else { - startTime = w.startTimes[v] - } - - return viewToMetric(v, w.r, now, startTime) + return viewToMetric(v, w.r, now) } // Read reads all view data and returns them as metrics. diff --git a/prometheus-to-sd/vendor/go.opencensus.io/tag/profile_19.go b/prometheus-to-sd/vendor/go.opencensus.io/tag/profile_19.go index b34d95e34..8fb17226f 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/tag/profile_19.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/tag/profile_19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package tag diff --git a/prometheus-to-sd/vendor/go.opencensus.io/tag/profile_not19.go b/prometheus-to-sd/vendor/go.opencensus.io/tag/profile_not19.go index 83adbce56..e28cf13cd 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/tag/profile_not19.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/tag/profile_not19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package tag diff --git a/prometheus-to-sd/vendor/go.opencensus.io/trace/basetypes.go b/prometheus-to-sd/vendor/go.opencensus.io/trace/basetypes.go index 0c54492a2..c8e26ed63 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/trace/basetypes.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/trace/basetypes.go @@ -49,6 +49,16 @@ type Attribute struct { value interface{} } +// Key returns the attribute's key +func (a *Attribute) Key() string { + return a.key +} + +// Value returns the attribute's value +func (a *Attribute) Value() interface{} { + return a.value +} + // BoolAttribute returns a bool-valued attribute. func BoolAttribute(key string, value bool) Attribute { return Attribute{key: key, value: value} diff --git a/prometheus-to-sd/vendor/go.opencensus.io/trace/doc.go b/prometheus-to-sd/vendor/go.opencensus.io/trace/doc.go index 04b1ee4f3..7a1616a55 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/trace/doc.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/trace/doc.go @@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io - -Exporting Traces +# Exporting Traces To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. - trace.RegisterExporter(exporter) + trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) Be careful about using trace.AlwaysSample in a production application with significant traffic: a new trace will be started and exported for every request. -Adding Spans to a Trace +# Adding Spans to a Trace A trace consists of a tree of spans. In Go, the current span is carried in a context.Context. @@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. diff --git a/prometheus-to-sd/vendor/go.opencensus.io/trace/lrumap.go b/prometheus-to-sd/vendor/go.opencensus.io/trace/lrumap.go index 908c2497e..80095a5f6 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/trace/lrumap.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/trace/lrumap.go @@ -44,7 +44,7 @@ func (lm lruMap) len() int { } func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, len(lm.cacheKeys)) + keys := make([]interface{}, 0, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } diff --git a/prometheus-to-sd/vendor/go.opencensus.io/trace/spanstore.go b/prometheus-to-sd/vendor/go.opencensus.io/trace/spanstore.go index c442d9902..e601f76f2 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/trace/spanstore.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/trace/spanstore.go @@ -48,8 +48,10 @@ func (i internalOnly) ReportActiveSpans(name string) []*SpanData { var out []*SpanData s.mu.Lock() defer s.mu.Unlock() - for span := range s.active { - out = append(out, span.makeSpanData()) + for activeSpan := range s.active { + if s, ok := activeSpan.(*span); ok { + out = append(out, s.makeSpanData()) + } } return out } @@ -185,7 +187,7 @@ func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency t // bucketed by latency. type spanStore struct { mu sync.Mutex // protects everything below. - active map[*Span]struct{} + active map[SpanInterface]struct{} errors map[int32]*bucket latency []bucket maxSpansPerErrorBucket int @@ -194,7 +196,7 @@ type spanStore struct { // newSpanStore creates a span store. func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { s := &spanStore{ - active: make(map[*Span]struct{}), + active: make(map[SpanInterface]struct{}), latency: make([]bucket, len(defaultLatencies)+1), maxSpansPerErrorBucket: errorBucketSize, } @@ -271,7 +273,7 @@ func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { } // add adds a span to the active bucket of the spanStore. -func (s *spanStore) add(span *Span) { +func (s *spanStore) add(span SpanInterface) { s.mu.Lock() s.active[span] = struct{}{} s.mu.Unlock() @@ -279,7 +281,7 @@ func (s *spanStore) add(span *Span) { // finished removes a span from the active set, and adds a corresponding // SpanData to a latency or error bucket. -func (s *spanStore) finished(span *Span, sd *SpanData) { +func (s *spanStore) finished(span SpanInterface, sd *SpanData) { latency := sd.EndTime.Sub(sd.StartTime) if latency < 0 { latency = 0 diff --git a/prometheus-to-sd/vendor/go.opencensus.io/trace/trace.go b/prometheus-to-sd/vendor/go.opencensus.io/trace/trace.go index 125e2cd90..861df9d39 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/trace/trace.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/trace/trace.go @@ -28,12 +28,16 @@ import ( "go.opencensus.io/trace/tracestate" ) +type tracer struct{} + +var _ Tracer = &tracer{} + // Span represents a span of a trace. It has an associated SpanContext, and // stores data accumulated while the span is active. // // Ideally users should interact with Spans by calling the functions in this // package that take a Context parameter. -type Span struct { +type span struct { // data contains information recorded about the span. // // It will be non-nil if we are exporting the span or recording events for it. @@ -66,7 +70,7 @@ type Span struct { // IsRecordingEvents returns true if events are being recorded for this span. // Use this check to avoid computing expensive annotations when they will never // be used. -func (s *Span) IsRecordingEvents() bool { +func (s *span) IsRecordingEvents() bool { if s == nil { return false } @@ -109,13 +113,13 @@ type SpanContext struct { type contextKey struct{} // FromContext returns the Span stored in a context, or nil if there isn't one. -func FromContext(ctx context.Context) *Span { +func (t *tracer) FromContext(ctx context.Context) *Span { s, _ := ctx.Value(contextKey{}).(*Span) return s } // NewContext returns a new context with the given Span attached. -func NewContext(parent context.Context, s *Span) context.Context { +func (t *tracer) NewContext(parent context.Context, s *Span) context.Context { return context.WithValue(parent, contextKey{}, s) } @@ -166,12 +170,14 @@ func WithSampler(sampler Sampler) StartOption { // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. -func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { +func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { var opts StartOptions var parent SpanContext - if p := FromContext(ctx); p != nil { - p.addChild() - parent = p.spanContext + if p := t.FromContext(ctx); p != nil { + if ps, ok := p.internal.(*span); ok { + ps.addChild() + } + parent = p.SpanContext() } for _, op := range o { op(&opts) @@ -180,7 +186,8 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end - return NewContext(ctx, span), span + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan } // StartSpanWithRemoteParent starts a new child span of the span from the given parent. @@ -190,7 +197,7 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. -func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { +func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { var opts StartOptions for _, op := range o { op(&opts) @@ -198,19 +205,24 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end - return NewContext(ctx, span), span + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan } -func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { - span := &Span{} - span.spanContext = parent +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span { + s := &span{} + s.spanContext = parent cfg := config.Load().(*Config) + if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok { + // lazy initialization + gen.init() + } if !hasParent { - span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + s.spanContext.TraceID = cfg.IDGenerator.NewTraceID() } - span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + s.spanContext.SpanID = cfg.IDGenerator.NewSpanID() sampler := cfg.DefaultSampler if !hasParent || remoteParent || o.Sampler != nil { @@ -222,47 +234,47 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa if o.Sampler != nil { sampler = o.Sampler } - span.spanContext.setIsSampled(sampler(SamplingParameters{ + s.spanContext.setIsSampled(sampler(SamplingParameters{ ParentContext: parent, - TraceID: span.spanContext.TraceID, - SpanID: span.spanContext.SpanID, + TraceID: s.spanContext.TraceID, + SpanID: s.spanContext.SpanID, Name: name, HasRemoteParent: remoteParent}).Sample) } - if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { - return span + if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() { + return s } - span.data = &SpanData{ - SpanContext: span.spanContext, + s.data = &SpanData{ + SpanContext: s.spanContext, StartTime: time.Now(), SpanKind: o.SpanKind, Name: name, HasRemoteParent: remoteParent, } - span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) - span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) - span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) - span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + s.links = newEvictedQueue(cfg.MaxLinksPerSpan) if hasParent { - span.data.ParentSpanID = parent.SpanID + s.data.ParentSpanID = parent.SpanID } if internal.LocalSpanStoreEnabled { var ss *spanStore ss = spanStoreForNameCreateIfNew(name) if ss != nil { - span.spanStore = ss - ss.add(span) + s.spanStore = ss + ss.add(s) } } - return span + return s } // End ends the span. -func (s *Span) End() { +func (s *span) End() { if s == nil { return } @@ -292,7 +304,7 @@ func (s *Span) End() { // makeSpanData produces a SpanData representing the current state of the Span. // It requires that s.data is non-nil. -func (s *Span) makeSpanData() *SpanData { +func (s *span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data @@ -317,7 +329,7 @@ func (s *Span) makeSpanData() *SpanData { } // SpanContext returns the SpanContext of the span. -func (s *Span) SpanContext() SpanContext { +func (s *span) SpanContext() SpanContext { if s == nil { return SpanContext{} } @@ -325,7 +337,7 @@ func (s *Span) SpanContext() SpanContext { } // SetName sets the name of the span, if it is recording events. -func (s *Span) SetName(name string) { +func (s *span) SetName(name string) { if !s.IsRecordingEvents() { return } @@ -335,7 +347,7 @@ func (s *Span) SetName(name string) { } // SetStatus sets the status of the span, if it is recording events. -func (s *Span) SetStatus(status Status) { +func (s *span) SetStatus(status Status) { if !s.IsRecordingEvents() { return } @@ -344,7 +356,7 @@ func (s *Span) SetStatus(status Status) { s.mu.Unlock() } -func (s *Span) interfaceArrayToLinksArray() []Link { +func (s *span) interfaceArrayToLinksArray() []Link { linksArr := make([]Link, 0, len(s.links.queue)) for _, value := range s.links.queue { linksArr = append(linksArr, value.(Link)) @@ -352,7 +364,7 @@ func (s *Span) interfaceArrayToLinksArray() []Link { return linksArr } -func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { +func (s *span) interfaceArrayToMessageEventArray() []MessageEvent { messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) for _, value := range s.messageEvents.queue { messageEventArr = append(messageEventArr, value.(MessageEvent)) @@ -360,7 +372,7 @@ func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { return messageEventArr } -func (s *Span) interfaceArrayToAnnotationArray() []Annotation { +func (s *span) interfaceArrayToAnnotationArray() []Annotation { annotationArr := make([]Annotation, 0, len(s.annotations.queue)) for _, value := range s.annotations.queue { annotationArr = append(annotationArr, value.(Annotation)) @@ -368,7 +380,7 @@ func (s *Span) interfaceArrayToAnnotationArray() []Annotation { return annotationArr } -func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { +func (s *span) lruAttributesToAttributeMap() map[string]interface{} { attributes := make(map[string]interface{}, s.lruAttributes.len()) for _, key := range s.lruAttributes.keys() { value, ok := s.lruAttributes.get(key) @@ -380,13 +392,13 @@ func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { return attributes } -func (s *Span) copyToCappedAttributes(attributes []Attribute) { +func (s *span) copyToCappedAttributes(attributes []Attribute) { for _, a := range attributes { s.lruAttributes.add(a.key, a.value) } } -func (s *Span) addChild() { +func (s *span) addChild() { if !s.IsRecordingEvents() { return } @@ -398,7 +410,7 @@ func (s *Span) addChild() { // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *Span) AddAttributes(attributes ...Attribute) { +func (s *span) AddAttributes(attributes ...Attribute) { if !s.IsRecordingEvents() { return } @@ -407,49 +419,27 @@ func (s *Span) AddAttributes(attributes ...Attribute) { s.mu.Unlock() } -// copyAttributes copies a slice of Attributes into a map. -func copyAttributes(m map[string]interface{}, attributes []Attribute) { - for _, a := range attributes { - m[a.key] = a.value - } -} - -func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { +func (s *span) printStringInternal(attributes []Attribute, str string) { now := time.Now() - msg := fmt.Sprintf(format, a...) - var m map[string]interface{} - s.mu.Lock() + var am map[string]interface{} if len(attributes) != 0 { - m = make(map[string]interface{}, len(attributes)) - copyAttributes(m, attributes) + am = make(map[string]interface{}, len(attributes)) + for _, attr := range attributes { + am[attr.key] = attr.value + } } - s.annotations.add(Annotation{ - Time: now, - Message: msg, - Attributes: m, - }) - s.mu.Unlock() -} - -func (s *Span) printStringInternal(attributes []Attribute, str string) { - now := time.Now() - var a map[string]interface{} s.mu.Lock() - if len(attributes) != 0 { - a = make(map[string]interface{}, len(attributes)) - copyAttributes(a, attributes) - } s.annotations.add(Annotation{ Time: now, Message: str, - Attributes: a, + Attributes: am, }) s.mu.Unlock() } // Annotate adds an annotation with attributes. // Attributes can be nil. -func (s *Span) Annotate(attributes []Attribute, str string) { +func (s *span) Annotate(attributes []Attribute, str string) { if !s.IsRecordingEvents() { return } @@ -457,11 +447,11 @@ func (s *Span) Annotate(attributes []Attribute, str string) { } // Annotatef adds an annotation with attributes. -func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { +func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) { if !s.IsRecordingEvents() { return } - s.lazyPrintfInternal(attributes, format, a...) + s.printStringInternal(attributes, fmt.Sprintf(format, a...)) } // AddMessageSendEvent adds a message send event to the span. @@ -470,7 +460,7 @@ func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{} // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. -func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { +func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } @@ -492,7 +482,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. -func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { +func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } @@ -509,7 +499,7 @@ func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compresse } // AddLink adds a link to the span. -func (s *Span) AddLink(l Link) { +func (s *span) AddLink(l Link) { if !s.IsRecordingEvents() { return } @@ -518,7 +508,7 @@ func (s *Span) AddLink(l Link) { s.mu.Unlock() } -func (s *Span) String() string { +func (s *span) String() string { if s == nil { return "" } @@ -534,20 +524,9 @@ func (s *Span) String() string { var config atomic.Value // access atomically func init() { - gen := &defaultIDGenerator{} - // initialize traceID and spanID generators. - var rngSeed int64 - for _, p := range []interface{}{ - &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, - } { - binary.Read(crand.Reader, binary.LittleEndian, p) - } - gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) - gen.spanIDInc |= 1 - config.Store(&Config{ DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: gen, + IDGenerator: &defaultIDGenerator{}, MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, @@ -571,6 +550,24 @@ type defaultIDGenerator struct { traceIDAdd [2]uint64 traceIDRand *rand.Rand + + initOnce sync.Once +} + +// init initializes the generator on the first call to avoid consuming entropy +// unnecessarily. +func (gen *defaultIDGenerator) init() { + gen.initOnce.Do(func() { + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + }) } // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. diff --git a/prometheus-to-sd/vendor/go.opencensus.io/trace/trace_api.go b/prometheus-to-sd/vendor/go.opencensus.io/trace/trace_api.go new file mode 100644 index 000000000..9e2c3a999 --- /dev/null +++ b/prometheus-to-sd/vendor/go.opencensus.io/trace/trace_api.go @@ -0,0 +1,265 @@ +// Copyright 2020, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" +) + +// DefaultTracer is the tracer used when package-level exported functions are invoked. +var DefaultTracer Tracer = &tracer{} + +// Tracer can start spans and access context functions. +type Tracer interface { + + // StartSpan starts a new child span of the current span in the context. If + // there is no span in the context, creates a new trace and span. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) + + // StartSpanWithRemoteParent starts a new child span of the span from the given parent. + // + // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is + // preferred for cases where the parent is propagated via an incoming request. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) + + // FromContext returns the Span stored in a context, or nil if there isn't one. + FromContext(ctx context.Context) *Span + + // NewContext returns a new context with the given Span attached. + NewContext(parent context.Context, s *Span) context.Context +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpan(ctx, name, o...) +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...) +} + +// FromContext returns the Span stored in a context, or a Span that is not +// recording events if there isn't one. +func FromContext(ctx context.Context) *Span { + return DefaultTracer.FromContext(ctx) +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return DefaultTracer.NewContext(parent, s) +} + +// SpanInterface represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type SpanInterface interface { + + // IsRecordingEvents returns true if events are being recorded for this span. + // Use this check to avoid computing expensive annotations when they will never + // be used. + IsRecordingEvents() bool + + // End ends the span. + End() + + // SpanContext returns the SpanContext of the span. + SpanContext() SpanContext + + // SetName sets the name of the span, if it is recording events. + SetName(name string) + + // SetStatus sets the status of the span, if it is recording events. + SetStatus(status Status) + + // AddAttributes sets attributes in the span. + // + // Existing attributes whose keys appear in the attributes parameter are overwritten. + AddAttributes(attributes ...Attribute) + + // Annotate adds an annotation with attributes. + // Attributes can be nil. + Annotate(attributes []Attribute, str string) + + // Annotatef adds an annotation with attributes. + Annotatef(attributes []Attribute, format string, a ...interface{}) + + // AddMessageSendEvent adds a message send event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddMessageReceiveEvent adds a message receive event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddLink adds a link to the span. + AddLink(l Link) + + // String prints a string representation of a span. + String() string +} + +// NewSpan is a convenience function for creating a *Span out of a *span +func NewSpan(s SpanInterface) *Span { + return &Span{internal: s} +} + +// Span is a struct wrapper around the SpanInt interface, which allows correctly handling +// nil spans, while also allowing the SpanInterface implementation to be swapped out. +type Span struct { + internal SpanInterface +} + +// Internal returns the underlying implementation of the Span +func (s *Span) Internal() SpanInterface { + return s.internal +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.internal.IsRecordingEvents() +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + s.internal.End() +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.internal.SpanContext() +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetName(name) +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetStatus(status) +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddAttributes(attributes...) +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotate(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotatef(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddLink(l) +} + +// String prints a string representation of a span. +func (s *Span) String() string { + if s == nil { + return "" + } + return s.internal.String() +} diff --git a/prometheus-to-sd/vendor/go.opencensus.io/trace/trace_go11.go b/prometheus-to-sd/vendor/go.opencensus.io/trace/trace_go11.go index b7d8aaf28..b8fc1e495 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/trace/trace_go11.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/trace/trace_go11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.11 // +build go1.11 package trace diff --git a/prometheus-to-sd/vendor/go.opencensus.io/trace/trace_nongo11.go b/prometheus-to-sd/vendor/go.opencensus.io/trace/trace_nongo11.go index e25419859..da488fc87 100644 --- a/prometheus-to-sd/vendor/go.opencensus.io/trace/trace_nongo11.go +++ b/prometheus-to-sd/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.11 // +build !go1.11 package trace diff --git a/prometheus-to-sd/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/prometheus-to-sd/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 37dc0cfdb..000000000 --- a/prometheus-to-sd/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/prometheus-to-sd/vendor/golang.org/x/net/http2/pipe.go b/prometheus-to-sd/vendor/golang.org/x/net/http2/pipe.go index c15b8a771..684d984fd 100644 --- a/prometheus-to-sd/vendor/golang.org/x/net/http2/pipe.go +++ b/prometheus-to-sd/vendor/golang.org/x/net/http2/pipe.go @@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) { p.c.L = &p.mu } defer p.c.Signal() - if p.err != nil { + if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } - if p.breakErr != nil { - p.unread += len(d) - return len(d), nil // discard when there is no reader - } return p.b.Write(d) } diff --git a/prometheus-to-sd/vendor/golang.org/x/net/http2/server.go b/prometheus-to-sd/vendor/golang.org/x/net/http2/server.go index 8cb14f3c9..cd057f398 100644 --- a/prometheus-to-sd/vendor/golang.org/x/net/http2/server.go +++ b/prometheus-to-sd/vendor/golang.org/x/net/http2/server.go @@ -1822,15 +1822,18 @@ func (sc *serverConn) processData(f *DataFrame) error { } if len(data) > 0 { + st.bodyBytes += int64(len(data)) wrote, err := st.body.Write(data) if err != nil { + // The handler has closed the request body. + // Return the connection-level flow control for the discarded data, + // but not the stream-level flow control. sc.sendWindowUpdate(nil, int(f.Length)-wrote) - return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed)) + return nil } if wrote != len(data) { panic("internal error: bad Writer") } - st.bodyBytes += int64(len(data)) } // Return any padded flow control now, since we won't diff --git a/prometheus-to-sd/vendor/golang.org/x/net/http2/transport.go b/prometheus-to-sd/vendor/golang.org/x/net/http2/transport.go index 05ba23d3d..f965579f7 100644 --- a/prometheus-to-sd/vendor/golang.org/x/net/http2/transport.go +++ b/prometheus-to-sd/vendor/golang.org/x/net/http2/transport.go @@ -560,10 +560,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { + roundTripErr := err if req, err = shouldRetryRequest(req, err); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { - t.vlogf("RoundTrip retrying after failure: %v", err) + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue } backoff := float64(uint(1) << (uint(retry) - 1)) @@ -572,7 +573,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res timer := backoffNewTimer(d) select { case <-timer.C: - t.vlogf("RoundTrip retrying after failure: %v", err) + t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): timer.Stop() @@ -2555,6 +2556,9 @@ func (b transportResponseBody) Close() error { cs := b.cs cc := cs.cc + cs.bufPipe.BreakWithError(errClosedResponseBody) + cs.abortStream(errClosedResponseBody) + unread := cs.bufPipe.Len() if unread > 0 { cc.mu.Lock() @@ -2573,9 +2577,6 @@ func (b transportResponseBody) Close() error { cc.wmu.Unlock() } - cs.bufPipe.BreakWithError(errClosedResponseBody) - cs.abortStream(errClosedResponseBody) - select { case <-cs.donec: case <-cs.ctx.Done(): diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/AUTHORS b/prometheus-to-sd/vendor/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/CONTRIBUTORS b/prometheus-to-sd/vendor/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/README.md b/prometheus-to-sd/vendor/golang.org/x/oauth2/README.md index 1473e1296..781770c20 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/README.md +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/README.md @@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) * [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) -## Policy for new packages +## Policy for new endpoints We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a @@ -29,8 +29,12 @@ package. ## Report Issues / Send Patches -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. In particular: + +* Excluding trivial changes, all contributions should be connected to an existing issue. +* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. +* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2). diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/authhandler/authhandler.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/authhandler/authhandler.go index 69967cf87..9bc6cd7bc 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/authhandler/authhandler.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/authhandler/authhandler.go @@ -13,11 +13,36 @@ import ( "golang.org/x/oauth2" ) +const ( + // Parameter keys for AuthCodeURL method to support PKCE. + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + + // Parameter key for Exchange method to support PKCE. + codeVerifierKey = "code_verifier" +) + +// PKCEParams holds parameters to support PKCE. +type PKCEParams struct { + Challenge string // The unpadded, base64-url-encoded string of the encrypted code verifier. + ChallengeMethod string // The encryption method (ex. S256). + Verifier string // The original, non-encrypted secret. +} + // AuthorizationHandler is a 3-legged-OAuth helper that prompts // the user for OAuth consent at the specified auth code URL // and returns an auth code and state upon approval. type AuthorizationHandler func(authCodeURL string) (code string, state string, err error) +// TokenSourceWithPKCE is an enhanced version of TokenSource with PKCE support. +// +// The pkce parameter supports PKCE flow, which uses code challenge and code verifier +// to prevent CSRF attacks. A unique code challenge and code verifier should be generated +// by the caller at runtime. See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func TokenSourceWithPKCE(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler, pkce *PKCEParams) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, authHandlerSource{config: config, ctx: ctx, authHandler: authHandler, state: state, pkce: pkce}) +} + // TokenSource returns an oauth2.TokenSource that fetches access tokens // using 3-legged-OAuth flow. // @@ -33,7 +58,7 @@ type AuthorizationHandler func(authCodeURL string) (code string, state string, e // and response before exchanging the auth code for OAuth token to prevent CSRF // attacks. func TokenSource(ctx context.Context, config *oauth2.Config, state string, authHandler AuthorizationHandler) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, authHandlerSource{config: config, ctx: ctx, authHandler: authHandler, state: state}) + return TokenSourceWithPKCE(ctx, config, state, authHandler, nil) } type authHandlerSource struct { @@ -41,10 +66,17 @@ type authHandlerSource struct { config *oauth2.Config authHandler AuthorizationHandler state string + pkce *PKCEParams } func (source authHandlerSource) Token() (*oauth2.Token, error) { - url := source.config.AuthCodeURL(source.state) + // Step 1: Obtain auth code. + var authCodeUrlOptions []oauth2.AuthCodeOption + if source.pkce != nil && source.pkce.Challenge != "" && source.pkce.ChallengeMethod != "" { + authCodeUrlOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeChallengeKey, source.pkce.Challenge), + oauth2.SetAuthURLParam(codeChallengeMethodKey, source.pkce.ChallengeMethod)} + } + url := source.config.AuthCodeURL(source.state, authCodeUrlOptions...) code, state, err := source.authHandler(url) if err != nil { return nil, err @@ -52,5 +84,11 @@ func (source authHandlerSource) Token() (*oauth2.Token, error) { if state != source.state { return nil, errors.New("state mismatch in 3-legged-OAuth flow") } - return source.config.Exchange(source.ctx, code) + + // Step 2: Exchange auth code for access token. + var exchangeOptions []oauth2.AuthCodeOption + if source.pkce != nil && source.pkce.Verifier != "" { + exchangeOptions = []oauth2.AuthCodeOption{oauth2.SetAuthURLParam(codeVerifierKey, source.pkce.Verifier)} + } + return source.config.Exchange(source.ctx, code, exchangeOptions...) } diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/default.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/default.go index 880dd7b59..b3e8783cc 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/default.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/default.go @@ -13,12 +13,15 @@ import ( "os" "path/filepath" "runtime" + "time" "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/authhandler" ) +const adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: // https://developers.google.com/accounts/docs/application-default-credentials @@ -54,11 +57,26 @@ type CredentialsParams struct { // Optional. Subject string - // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Optional. + // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Required for 3LO flow. AuthHandler authhandler.AuthorizationHandler - // State is a unique string used with AuthHandler. Optional. + // State is a unique string used with AuthHandler. Required for 3LO flow. State string + + // PKCE is used to support PKCE flow. Optional for 3LO flow. + PKCE *authhandler.PKCEParams + + // The OAuth2 TokenURL default override. This value overrides the default TokenURL, + // unless explicitly specified by the credentials config file. Optional. + TokenURL string + + // EarlyTokenRefresh is the amount of time before a token expires that a new + // token will be preemptively fetched. If unset the default value is 10 + // seconds. + // + // Note: This option is currently only respected when using credentials + // fetched from the GCE metadata server. + EarlyTokenRefresh time.Duration } func (params CredentialsParams) deepCopy() CredentialsParams { @@ -94,20 +112,20 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc // It looks for credentials in the following places, // preferring the first location found: // -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// For workload identity federation, refer to -// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on -// how to generate the JSON configuration file for on-prem/non-Google cloud -// platforms. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses -// the appengine.AccessToken function. -// 4. On Google Compute Engine, Google App Engine standard second generation runtimes -// (>= Go 1.11), and Google App Engine flexible environment, it fetches -// credentials from the metadata server. +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on +// how to generate the JSON configuration file for on-prem/non-Google cloud +// platforms. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses +// the appengine.AccessToken function. +// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { // Make defensive copy of the slices in params. params = params.deepCopy() @@ -134,7 +152,7 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) // and App Engine flexible use ComputeTokenSource and the metadata server. if appengineTokenFunc != nil { - return &DefaultCredentials{ + return &Credentials{ ProjectID: appengineAppIDFunc(ctx), TokenSource: AppEngineTokenSource(ctx, params.Scopes...), }, nil @@ -144,15 +162,14 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // or App Engine flexible, use the metadata server. if metadata.OnGCE() { id, _ := metadata.ProjectID() - return &DefaultCredentials{ + return &Credentials{ ProjectID: id, - TokenSource: ComputeTokenSource("", params.Scopes...), + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), }, nil } // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information", adcSetupURL) } // FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. @@ -176,7 +193,7 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if config != nil { return &Credentials{ ProjectID: "", - TokenSource: authhandler.TokenSource(ctx, config, params.State, params.AuthHandler), + TokenSource: authhandler.TokenSourceWithPKCE(ctx, config, params.State, params.AuthHandler, params.PKCE), JSON: jsonData, }, nil } @@ -190,7 +207,8 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if err != nil { return nil, err } - return &DefaultCredentials{ + ts = newErrWrappingTokenSource(ts) + return &Credentials{ ProjectID: f.ProjectID, TokenSource: ts, JSON: jsonData, @@ -212,7 +230,7 @@ func wellKnownFile() string { return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) } -func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*DefaultCredentials, error) { +func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { b, err := ioutil.ReadFile(filename) if err != nil { return nil, err diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/doc.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/doc.go index 8e6a57ce9..ca717634a 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/doc.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/doc.go @@ -15,18 +15,18 @@ // For more information on using workload identity federation, refer to // https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. // -// OAuth2 Configs +// # OAuth2 Configs // // Two functions in this package return golang.org/x/oauth2.Config values from Google credential // data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, // the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or // create an http.Client. // -// Workload Identity Federation +// # Workload Identity Federation // // Using workload identity federation, your application can access Google Cloud // resources from Amazon Web Services (AWS), Microsoft Azure or any identity -// provider that supports OpenID Connect (OIDC). +// provider that supports OpenID Connect (OIDC) or SAML 2.0. // Traditionally, applications running outside Google Cloud have used service // account keys to access Google Cloud resources. Using identity federation, // you can allow your workload to impersonate a service account. @@ -36,23 +36,77 @@ // Follow the detailed instructions on how to configure Workload Identity Federation // in various platforms: // -// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws -// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure -// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc +// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml // -// For OIDC providers, the library can retrieve OIDC tokens either from a -// local file location (file-sourced credentials) or from a local server -// (URL-sourced credentials). +// For OIDC and SAML providers, the library can retrieve tokens in three ways: +// from a local file location (file-sourced credentials), from a server +// (URL-sourced credentials), or from a local executable (executable-sourced +// credentials). // For file-sourced credentials, a background process needs to be continuously -// refreshing the file location with a new OIDC token prior to expiration. +// refreshing the file location with a new OIDC/SAML token prior to expiration. // For tokens with one hour lifetimes, the token needs to be updated in the file // every hour. The token can be stored directly as plain text or in JSON format. // For URL-sourced credentials, a local server needs to host a GET endpoint to -// return the OIDC token. The response can be in plain text or JSON. +// return the OIDC/SAML token. The response can be in plain text or JSON. // Additional required request headers can also be specified. +// For executable-sourced credentials, an application needs to be available to +// output the OIDC/SAML token and other information in a JSON format. +// For more information on how these work (and how to implement +// executable-sourced credentials), please check out: +// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration // +// Note that this library does not perform any validation on the token_url, token_info_url, +// or service_account_impersonation_url fields of the credential configuration. +// It is not recommended to use a credential configuration that you did not generate with +// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. // -// Credentials +// # Workforce Identity Federation +// +// Workforce identity federation lets you use an external identity provider (IdP) to +// authenticate and authorize a workforce—a group of users, such as employees, partners, +// and contractors—using IAM, so that the users can access Google Cloud services. +// Workforce identity federation extends Google Cloud's identity capabilities to support +// syncless, attribute-based single sign on. +// +// With workforce identity federation, your workforce can access Google Cloud resources +// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +// Services (AD FS), Okta, and others. +// +// Follow the detailed instructions on how to configure Workload Identity Federation +// in various platforms: +// +// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml +// +// For workforce identity federation, the library can retrieve tokens in three ways: +// from a local file location (file-sourced credentials), from a server +// (URL-sourced credentials), or from a local executable (executable-sourced +// credentials). +// For file-sourced credentials, a background process needs to be continuously +// refreshing the file location with a new OIDC/SAML token prior to expiration. +// For tokens with one hour lifetimes, the token needs to be updated in the file +// every hour. The token can be stored directly as plain text or in JSON format. +// For URL-sourced credentials, a local server needs to host a GET endpoint to +// return the OIDC/SAML token. The response can be in plain text or JSON. +// Additional required request headers can also be specified. +// For executable-sourced credentials, an application needs to be available to +// output the OIDC/SAML token and other information in a JSON format. +// For more information on how these work (and how to implement +// executable-sourced credentials), please check out: +// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in +// +// Note that this library does not perform any validation on the token_url, token_info_url, +// or service_account_impersonation_url fields of the credential configuration. +// It is not recommended to use a credential configuration that you did not generate with +// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +// +// # Credentials // // The Credentials type represents Google credentials, including Application Default // Credentials. diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/error.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/error.go new file mode 100644 index 000000000..d84dd0047 --- /dev/null +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/error.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "errors" + + "golang.org/x/oauth2" +) + +// AuthenticationError indicates there was an error in the authentication flow. +// +// Use (*AuthenticationError).Temporary to check if the error can be retried. +type AuthenticationError struct { + err *oauth2.RetrieveError +} + +func newAuthenticationError(err error) error { + re := &oauth2.RetrieveError{} + if !errors.As(err, &re) { + return err + } + return &AuthenticationError{ + err: re, + } +} + +// Temporary indicates that the network error has one of the following status codes and may be retried: 500, 503, 408, or 429. +func (e *AuthenticationError) Temporary() bool { + if e.err.Response == nil { + return false + } + sc := e.err.Response.StatusCode + return sc == 500 || sc == 503 || sc == 408 || sc == 429 +} + +func (e *AuthenticationError) Error() string { + return e.err.Error() +} + +func (e *AuthenticationError) Unwrap() error { + return e.err +} + +type errWrappingTokenSource struct { + src oauth2.TokenSource +} + +func newErrWrappingTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { + return &errWrappingTokenSource{src: ts} +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *errWrappingTokenSource) Token() (*oauth2.Token, error) { + t, err := s.src.Token() + if err != nil { + return nil, newAuthenticationError(err) + } + return t, nil +} diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/google.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/google.go index ccc23ee0a..cc1223889 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/google.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/google.go @@ -26,6 +26,9 @@ var Endpoint = oauth2.Endpoint{ AuthStyle: oauth2.AuthStyleInParams, } +// MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. +const MTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + // JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. const JWTTokenURL = "https://oauth2.googleapis.com/token" @@ -122,6 +125,7 @@ type credentialsFile struct { TokenURLExternal string `json:"token_url"` TokenInfoURL string `json:"token_info_url"` ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + ServiceAccountImpersonation serviceAccountImpersonationInfo `json:"service_account_impersonation"` Delegates []string `json:"delegates"` CredentialSource externalaccount.CredentialSource `json:"credential_source"` QuotaProjectID string `json:"quota_project_id"` @@ -131,6 +135,10 @@ type credentialsFile struct { SourceCredentials *credentialsFile `json:"source_credentials"` } +type serviceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config { cfg := &jwt.Config{ Email: f.ClientEmail, @@ -139,6 +147,7 @@ func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config Scopes: scopes, TokenURL: f.TokenURL, Subject: subject, // This is the user email to impersonate + Audience: f.Audience, } if cfg.TokenURL == "" { cfg.TokenURL = JWTTokenURL @@ -166,7 +175,11 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar cfg.Endpoint.AuthURL = Endpoint.AuthURL } if cfg.Endpoint.TokenURL == "" { - cfg.Endpoint.TokenURL = Endpoint.TokenURL + if params.TokenURL != "" { + cfg.Endpoint.TokenURL = params.TokenURL + } else { + cfg.Endpoint.TokenURL = Endpoint.TokenURL + } } tok := &oauth2.Token{RefreshToken: f.RefreshToken} return cfg.TokenSource(ctx, tok), nil @@ -177,12 +190,13 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar TokenURL: f.TokenURLExternal, TokenInfoURL: f.TokenInfoURL, ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, - ClientSecret: f.ClientSecret, - ClientID: f.ClientID, - CredentialSource: f.CredentialSource, - QuotaProjectID: f.QuotaProjectID, - Scopes: params.Scopes, - WorkforcePoolUserProject: f.WorkforcePoolUserProject, + ServiceAccountImpersonationLifetimeSeconds: f.ServiceAccountImpersonation.TokenLifetimeSeconds, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + WorkforcePoolUserProject: f.WorkforcePoolUserProject, } return cfg.TokenSource(ctx) case impersonatedServiceAccount: @@ -217,7 +231,11 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar // Further information about retrieving access tokens from the GCE metadata // server can be found at https://cloud.google.com/compute/docs/authentication. func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) + return computeTokenSource(account, 0, scope...) +} + +func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSourceWithExpiry(nil, computeSource{account: account, scopes: scope}, earlyExpiry) } type computeSource struct { diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index a5a5423c6..2bf3202b2 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -52,9 +52,23 @@ const ( // The AWS authorization header name for the security session token if available. awsSecurityTokenHeader = "x-amz-security-token" + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTtlHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTtl = "300" + // The AWS authorization header name for the auto-generated date. awsDateHeader = "x-amz-date" + // Supported AWS configuration environment variables. + awsAccessKeyId = "AWS_ACCESS_KEY_ID" + awsDefaultRegion = "AWS_DEFAULT_REGION" + awsRegion = "AWS_REGION" + awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + awsSessionToken = "AWS_SESSION_TOKEN" + awsTimeFormatLong = "20060102T150405Z" awsTimeFormatShort = "20060102" ) @@ -241,6 +255,7 @@ type awsCredentialSource struct { RegionURL string RegionalCredVerificationURL string CredVerificationURL string + IMDSv2SessionTokenURL string TargetResource string requestSigner *awsRequestSigner region string @@ -259,6 +274,49 @@ type awsRequest struct { Headers []awsRequestHeader `json:"headers"` } +func (cs awsCredentialSource) validateMetadataServers() error { + if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil { + return err + } + if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil { + return err + } + return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url") +} + +var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"} + +func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool { + if metadataUrl == "" { + // Zero value means use default, which is valid. + return true + } + + u, err := url.Parse(metadataUrl) + if err != nil { + // Unparseable URL means invalid + return false + } + + for _, validHostname := range validHostnames { + if u.Hostname() == validHostname { + // If it's one of the valid hostnames, everything is good + return true + } + } + + // hostname not found in our allowlist, so not valid + return false +} + +func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error { + if !cs.isValidMetadataServer(metadataUrl) { + return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName) + } + + return nil +} + func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { if cs.client == nil { cs.client = oauth2.NewClient(cs.ctx, nil) @@ -266,14 +324,41 @@ func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, erro return cs.client.Do(req.WithContext(cs.ctx)) } +func canRetrieveRegionFromEnvironment() bool { + // The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is + // required. + return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != "" +} + +func canRetrieveSecurityCredentialFromEnvironment() bool { + // Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. + return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" +} + +func shouldUseMetadataServer() bool { + return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() +} + func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { - awsSecurityCredentials, err := cs.getSecurityCredentials() + headers := make(map[string]string) + if shouldUseMetadataServer() { + awsSessionToken, err := cs.getAWSSessionToken() + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + } + + awsSecurityCredentials, err := cs.getSecurityCredentials(headers) if err != nil { return "", err } - if cs.region, err = cs.getRegion(); err != nil { + if cs.region, err = cs.getRegion(headers); err != nil { return "", err } @@ -340,12 +425,42 @@ func (cs awsCredentialSource) subjectToken() (string, error) { return url.QueryEscape(string(result)), nil } -func (cs *awsCredentialSource) getRegion() (string, error) { - if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { - return envAwsRegion, nil +func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { + if cs.IMDSv2SessionTokenURL == "" { + return "", nil + } + + req, err := http.NewRequest("PUT", cs.IMDSv2SessionTokenURL, nil) + if err != nil { + return "", err + } + + req.Header.Add(awsIMDSv2SessionTtlHeader, awsIMDSv2SessionTtl) + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google: unable to retrieve AWS session token - %s", string(respBody)) } - if envAwsRegion := getenv("AWS_DEFAULT_REGION"); envAwsRegion != "" { - return envAwsRegion, nil + + return string(respBody), nil +} + +func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv("AWS_DEFAULT_REGION"), nil } if cs.RegionURL == "" { @@ -357,6 +472,10 @@ func (cs *awsCredentialSource) getRegion() (string, error) { return "", err } + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return "", err @@ -381,23 +500,21 @@ func (cs *awsCredentialSource) getRegion() (string, error) { return string(respBody[:respBodyEnd]), nil } -func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCredentials, err error) { - if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" { - if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" { - return awsSecurityCredentials{ - AccessKeyID: accessKeyID, - SecretAccessKey: secretAccessKey, - SecurityToken: getenv("AWS_SESSION_TOKEN"), - }, nil - } +func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { + if canRetrieveSecurityCredentialFromEnvironment() { + return awsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyId), + SecretAccessKey: getenv(awsSecretAccessKey), + SecurityToken: getenv(awsSessionToken), + }, nil } - roleName, err := cs.getMetadataRoleName() + roleName, err := cs.getMetadataRoleName(headers) if err != nil { return } - credentials, err := cs.getMetadataSecurityCredentials(roleName) + credentials, err := cs.getMetadataSecurityCredentials(roleName, headers) if err != nil { return } @@ -413,7 +530,7 @@ func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCrede return credentials, nil } -func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) (awsSecurityCredentials, error) { +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (awsSecurityCredentials, error) { var result awsSecurityCredentials req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil) @@ -422,6 +539,10 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) ( } req.Header.Add("Content-Type", "application/json") + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return result, err @@ -441,7 +562,7 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) ( return result, err } -func (cs *awsCredentialSource) getMetadataRoleName() (string, error) { +func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) { if cs.CredVerificationURL == "" { return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint") } @@ -451,6 +572,10 @@ func (cs *awsCredentialSource) getMetadataRoleName() (string, error) { return "", err } + for name, value := range headers { + req.Header.Add(name, value) + } + resp, err := cs.doRequest(req) if err != nil { return "", err diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index bc3ce5317..dcd252a61 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -39,6 +39,9 @@ type Config struct { // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only // required for workload identity pools when APIs to be accessed have not integrated with UberMint. ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. + ServiceAccountImpersonationLifetimeSeconds int // ClientSecret is currently only required if token_info endpoint also // needs to be called with the generated GCP access token. When provided, STS will be // called with additional basic authentication using client_id as username and client_secret as password. @@ -64,20 +67,6 @@ type Config struct { // that include all elements in a given list, in that order. var ( - validTokenURLPatterns = []*regexp.Regexp{ - // The complicated part in the middle matches any number of characters that - // aren't period, spaces, or slashes. - regexp.MustCompile(`(?i)^[^\.\s\/\\]+\.sts\.googleapis\.com$`), - regexp.MustCompile(`(?i)^sts\.googleapis\.com$`), - regexp.MustCompile(`(?i)^sts\.[^\.\s\/\\]+\.googleapis\.com$`), - regexp.MustCompile(`(?i)^[^\.\s\/\\]+-sts\.googleapis\.com$`), - } - validImpersonateURLPatterns = []*regexp.Regexp{ - regexp.MustCompile(`^[^\.\s\/\\]+\.iamcredentials\.googleapis\.com$`), - regexp.MustCompile(`^iamcredentials\.googleapis\.com$`), - regexp.MustCompile(`^iamcredentials\.[^\.\s\/\\]+\.googleapis\.com$`), - regexp.MustCompile(`^[^\.\s\/\\]+-iamcredentials\.googleapis\.com$`), - } validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) ) @@ -105,25 +94,13 @@ func validateWorkforceAudience(input string) bool { // TokenSource Returns an external account TokenSource struct. This is to be called by package google to construct a google.Credentials. func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { - return c.tokenSource(ctx, validTokenURLPatterns, validImpersonateURLPatterns, "https") + return c.tokenSource(ctx, "https") } // tokenSource is a private function that's directly called by some of the tests, // because the unit test URLs are mocked, and would otherwise fail the // validity check. -func (c *Config) tokenSource(ctx context.Context, tokenURLValidPats []*regexp.Regexp, impersonateURLValidPats []*regexp.Regexp, scheme string) (oauth2.TokenSource, error) { - valid := validateURL(c.TokenURL, tokenURLValidPats, scheme) - if !valid { - return nil, fmt.Errorf("oauth2/google: invalid TokenURL provided while constructing tokenSource") - } - - if c.ServiceAccountImpersonationURL != "" { - valid := validateURL(c.ServiceAccountImpersonationURL, impersonateURLValidPats, scheme) - if !valid { - return nil, fmt.Errorf("oauth2/google: invalid ServiceAccountImpersonationURL provided while constructing tokenSource") - } - } - +func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { if c.WorkforcePoolUserProject != "" { valid := validateWorkforceAudience(c.Audience) if !valid { @@ -141,10 +118,11 @@ func (c *Config) tokenSource(ctx context.Context, tokenURLValidPats []*regexp.Re scopes := c.Scopes ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} imp := ImpersonateTokenSource{ - Ctx: ctx, - URL: c.ServiceAccountImpersonationURL, - Scopes: scopes, - Ts: oauth2.ReuseTokenSource(nil, ts), + Ctx: ctx, + URL: c.ServiceAccountImpersonationURL, + Scopes: scopes, + Ts: oauth2.ReuseTokenSource(nil, ts), + TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, } return oauth2.ReuseTokenSource(nil, imp), nil } @@ -163,7 +141,7 @@ type format struct { } // CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. -// Either the File or the URL field should be filled, depending on the kind of credential in question. +// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. // The EnvironmentID should start with AWS if being used for an AWS credential. type CredentialSource struct { File string `json:"file"` @@ -171,33 +149,54 @@ type CredentialSource struct { URL string `json:"url"` Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable"` + EnvironmentID string `json:"environment_id"` RegionURL string `json:"region_url"` RegionalCredVerificationURL string `json:"regional_cred_verification_url"` CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` Format format `json:"format"` } -// parse determines the type of CredentialSource needed +type ExecutableConfig struct { + Command string `json:"command"` + TimeoutMillis *int `json:"timeout_millis"` + OutputFile string `json:"output_file"` +} + +// parse determines the type of CredentialSource needed. func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { if awsVersion != 1 { return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion) } - return awsCredentialSource{ + + awsCredSource := awsCredentialSource{ EnvironmentID: c.CredentialSource.EnvironmentID, RegionURL: c.CredentialSource.RegionURL, RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, CredVerificationURL: c.CredentialSource.URL, TargetResource: c.Audience, ctx: ctx, - }, nil + } + if c.CredentialSource.IMDSv2SessionTokenURL != "" { + awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL + } + + if err := awsCredSource.validateMetadataServers(); err != nil { + return nil, err + } + + return awsCredSource, nil } } else if c.CredentialSource.File != "" { return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil } else if c.CredentialSource.URL != "" { return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } else if c.CredentialSource.Executable != nil { + return CreateExecutableCredential(ctx, c.CredentialSource.Executable, c) } return nil, fmt.Errorf("oauth2/google: unable to parse credential source") } diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go new file mode 100644 index 000000000..579bcce5f --- /dev/null +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go @@ -0,0 +1,309 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + "time" +) + +var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken") + +const ( + executableSupportedMaxVersion = 1 + defaultTimeout = 30 * time.Second + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + executableSource = "response" + outputFileSource = "output file" +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("oauth2/google: %v missing `%q` field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("oauth2/google: unable to parse %v\nResponse: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"oauth2/google: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("oauth2/google: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("oauth2/google: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"oauth2/google: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("oauth2/google: %v contains unsupported token type", source) +} + +func exitCodeError(exitCode int) error { + return fmt.Errorf("oauth2/google: executable command failed with exit code %v", exitCode) +} + +func executableError(err error) error { + return fmt.Errorf("oauth2/google: executable command failed: %v", err) +} + +func executablesDisallowedError() error { + return errors.New("oauth2/google: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") +} + +func timeoutRangeError() error { + return errors.New("oauth2/google: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") +} + +func commandMissingError() error { + return errors.New("oauth2/google: missing `command` field — executable command must be provided") +} + +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} + +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} + +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError.ExitCode()) + } + + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableCredentialSource struct { + Command string + Timeout time.Duration + OutputFile string + ctx context.Context + config *Config + env environment +} + +// CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. +// It also performs defaulting and type conversions. +func CreateExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { + if ec.Command == "" { + return executableCredentialSource{}, commandMissingError() + } + + result := executableCredentialSource{} + result.Command = ec.Command + if ec.TimeoutMillis == nil { + result.Timeout = defaultTimeout + } else { + result.Timeout = time.Duration(*ec.TimeoutMillis) * time.Millisecond + if result.Timeout < timeoutMinimum || result.Timeout > timeoutMaximum { + return executableCredentialSource{}, timeoutRangeError() + } + } + result.OutputFile = ec.OutputFile + result.ctx = ctx + result.config = config + result.env = runtimeEnvironment{} + return result, nil +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IdToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + + if result.Success == nil { + return "", missingFieldError(source, "success") + } + + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + + if result.ExpirationTime == 0 && cs.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:jwt" || result.TokenType == "urn:ietf:params:oauth:token-type:id_token" { + if result.IdToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IdToken, nil + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:saml2" { + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + } + + return "", tokenTypeError(source) +} + +func (cs executableCredentialSource) subjectToken() (string, error) { + if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + + return cs.getTokenFromExecutableCommand() +} + +func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err error) { + if cs.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(cs.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := ioutil.ReadAll(io.LimitReader(file, 1<<20)) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = cs.parseSubjectTokenFromSource(data, outputFileSource, cs.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (cs executableCredentialSource) executableEnvironment() []string { + result := cs.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", cs.config.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", cs.config.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if cs.config.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(cs.config.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if cs.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", cs.OutputFile)) + } + return result +} + +func (cs executableCredentialSource) getTokenFromExecutableCommand() (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if cs.env.getenv("GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES") != "1" { + return "", executablesDisallowedError() + } + + ctx, cancel := context.WithDeadline(cs.ctx, cs.env.now().Add(cs.Timeout)) + defer cancel() + + output, err := cs.env.run(ctx, cs.Command, cs.executableEnvironment()) + if err != nil { + return "", err + } + return cs.parseSubjectTokenFromSource(output, executableSource, cs.env.now().Unix()) +} diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go index 8251fc85e..54c8f209f 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go @@ -48,12 +48,19 @@ type ImpersonateTokenSource struct { // Each service account must be granted roles/iam.serviceAccountTokenCreator // on the next service account in the chain. Optional. Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. + TokenLifetimeSeconds int } // Token performs the exchange to get a temporary service account token to allow access to GCP. func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { + lifetimeString := "3600s" + if its.TokenLifetimeSeconds != 0 { + lifetimeString = fmt.Sprintf("%ds", its.TokenLifetimeSeconds) + } reqBody := generateAccessTokenReq{ - Lifetime: "3600s", + Lifetime: lifetimeString, Scope: its.Scopes, Delegates: its.Delegates, } diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/jwt.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/jwt.go index 67d97b990..e89e6ae17 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/google/jwt.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/google/jwt.go @@ -66,7 +66,8 @@ func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.Toke if err != nil { return nil, err } - return oauth2.ReuseTokenSource(tok, ts), nil + rts := newErrWrappingTokenSource(oauth2.ReuseTokenSource(tok, ts)) + return rts, nil } type jwtAccessTokenSource struct { diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/internal/token.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/internal/token.go index 355c38696..b4723fcac 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/internal/token.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/internal/token.go @@ -19,8 +19,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/context/ctxhttp" ) // Token represents the credentials used to authorize @@ -229,7 +227,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, } func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { - r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + r, err := ContextClient(ctx).Do(req.WithContext(ctx)) if err != nil { return nil, err } diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/jws/jws.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/jws/jws.go index 683d2d271..95015648b 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/jws/jws.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/jws/jws.go @@ -178,5 +178,5 @@ func Verify(token string, key *rsa.PublicKey) error { h := sha256.New() h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) } diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/oauth2.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/oauth2.go index 291df5c83..9085fabe3 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/oauth2.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/oauth2.go @@ -16,6 +16,7 @@ import ( "net/url" "strings" "sync" + "time" "golang.org/x/oauth2/internal" ) @@ -140,7 +141,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // // State is a token to protect the user from CSRF attacks. You must // always provide a non-empty string and validate that it matches the -// the state query parameter on your redirect callback. +// state query parameter on your redirect callback. // See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well @@ -290,6 +291,8 @@ type reuseTokenSource struct { mu sync.Mutex // guards t t *Token + + expiryDelta time.Duration } // Token returns the current token if it's still valid, else will @@ -305,6 +308,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { if err != nil { return nil, err } + t.expiryDelta = s.expiryDelta s.t = t return t, nil } @@ -379,3 +383,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { new: src, } } + +// ReuseTokenSource returns a TokenSource that acts in the same manner as the +// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// configurable. The expiration time of a token is calculated as +// t.Expiry.Add(-earlyExpiry). +func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly, but set the expiryDelta to earlyExpiry, + // so the behavior matches what the user expects. + rt.expiryDelta = earlyExpiry + return rt + } + src = rt.new + } + if t != nil { + t.expiryDelta = earlyExpiry + } + return &reuseTokenSource{ + t: t, + new: src, + expiryDelta: earlyExpiry, + } +} diff --git a/prometheus-to-sd/vendor/golang.org/x/oauth2/token.go b/prometheus-to-sd/vendor/golang.org/x/oauth2/token.go index 822720341..7c64006de 100644 --- a/prometheus-to-sd/vendor/golang.org/x/oauth2/token.go +++ b/prometheus-to-sd/vendor/golang.org/x/oauth2/token.go @@ -16,10 +16,10 @@ import ( "golang.org/x/oauth2/internal" ) -// expiryDelta determines how earlier a token should be considered +// defaultExpiryDelta determines how earlier a token should be considered // expired than its actual expiration time. It is used to avoid late // expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second +const defaultExpiryDelta = 10 * time.Second // Token represents the credentials used to authorize // the requests to access protected resources on the OAuth 2.0 @@ -52,6 +52,11 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. raw interface{} + + // expiryDelta is used to calculate when a token is considered + // expired, by subtracting from Expiry. If zero, defaultExpiryDelta + // is used. + expiryDelta time.Duration } // Type returns t.TokenType if non-empty, else "Bearer". @@ -127,6 +132,11 @@ func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } + + expiryDelta := defaultExpiryDelta + if t.expiryDelta != 0 { + expiryDelta = t.expiryDelta + } return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) } diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl_signed.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl_signed.go new file mode 100644 index 000000000..7def9580e --- /dev/null +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -0,0 +1,70 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || solaris +// +build aix solaris + +package unix + +import ( + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req int, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req int, value int) error { + v := int32(value) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req int, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value will usually be TCSETA or TIOCSETA. +func IoctlSetTermios(fd int, req int, value *Termios) error { + // TODO: if we get the chance, remove the req parameter. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req int) (int, error) { + var value int + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return value, err +} + +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { + var value Winsize + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} + +func IoctlGetTermios(fd int, req int) (*Termios, error) { + var value Termios + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl_unsigned.go similarity index 92% rename from prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl.go rename to prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 7ce8dd406..649913d1e 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris +//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd +// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl_zos.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl_zos.go index 6532f09af..cdc21bf76 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -17,14 +17,14 @@ import ( // IoctlSetInt performs an ioctl operation which sets an integer value // on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { +func IoctlSetInt(fd int, req int, value int) error { return ioctl(fd, req, uintptr(value)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func IoctlSetWinsize(fd int, req int, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. return ioctlPtr(fd, req, unsafe.Pointer(value)) @@ -33,7 +33,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // IoctlSetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCSETS, TCSETSW, or TCSETSF -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func IoctlSetTermios(fd int, req int, value *Termios) error { if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) { return ENOSYS } @@ -47,13 +47,13 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // // A few ioctl requests use the return value as an output parameter; // for those, IoctlRetInt should be used instead of this function. -func IoctlGetInt(fd int, req uint) (int, error) { +func IoctlGetInt(fd int, req int) (int, error) { var value int err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { var value Winsize err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err @@ -62,7 +62,7 @@ func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { // IoctlGetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCGETS -func IoctlGetTermios(fd int, req uint) (*Termios, error) { +func IoctlGetTermios(fd int, req int) (*Termios, error) { var value Termios if req != TCGETS { return &value, ENOSYS diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/mkerrors.sh b/prometheus-to-sd/vendor/golang.org/x/sys/unix/mkerrors.sh index 7456d9ddd..2045d3dad 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -66,6 +66,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -521,6 +522,7 @@ ccflags="$@" $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || + $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix.go index d9f5544cc..c406ae00f 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -408,8 +408,8 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } -//sys ioctl(fd int, req uint, arg uintptr) (err error) -//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = ioctl +//sys ioctl(fd int, req int, arg uintptr) (err error) +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = ioctl // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index e92a0be16..f2871fa95 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = getrlimit64 -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) = setrlimit64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek64 //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 16eed1709..75718ec0f 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) = mmap64 diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_darwin.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_darwin.go index 7064d6eba..206921504 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -613,6 +613,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) @@ -622,7 +623,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Setprivexec(flag int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -676,7 +676,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Kqueue_from_portset_np // Kqueue_portset // Getattrlist -// Setattrlist // Getdirentriesattr // Searchfs // Delete diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 221efc26b..d4ce988e7 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -326,7 +326,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 5bdde03e4..afb10106f 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -433,7 +433,6 @@ func Dup3(oldfd, newfd, flags int) error { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux.go index 973533153..fbaeb5fff 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1873,7 +1873,6 @@ func Getpgrp() (pid int) { //sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) @@ -1887,6 +1886,15 @@ func Getpgrp() (pid int) { //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) +//go:linkname syscall_prlimit syscall.prlimit +func syscall_prlimit(pid, resource int, newlimit, old *syscall.Rlimit) error + +func Prlimit(pid, resource int, newlimit, old *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall_prlimit(pid, resource, (*syscall.Rlimit)(newlimit), (*syscall.Rlimit)(old)) +} + // PrctlRetInt performs a prctl operation specified by option and further // optional arguments arg2 through arg5 depending on option. It returns a // non-negative integer that is returned by the prctl syscall. diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_386.go index ff5b5899d..c7d9945ea 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -97,33 +97,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { newoffset, errno := seek(fd, offset, whence) if errno != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 9b2703532..5b21fcfd7 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -46,7 +46,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 856ad1d63..da2986415 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -171,33 +171,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return uint64(r.Uregs[15]) } func (r *PtraceRegs) SetPC(pc uint64) { r.Uregs[15] = uint32(pc) } diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 6422704bc..a81f5742b 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -39,7 +39,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -143,15 +142,6 @@ func Getrlimit(resource int, rlim *Rlimit) error { return getrlimit(resource, rlim) } -// Setrlimit prefers the prlimit64 system call. See issue 38604. -func Setrlimit(resource int, rlim *Rlimit) error { - err := Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - return setrlimit(resource, rlim) -} - func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 59dab510e..69d2d7c3d 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -126,11 +126,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - return -} - func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { if tv == nil { return utimensat(dirfd, path, nil, 0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index bfef09a39..76d564095 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -37,7 +37,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index ab3025096..aae7f0ffd 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -151,33 +151,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return r.Epc } func (r *PtraceRegs) SetPC(pc uint64) { r.Epc = pc } diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index eac1cf1ac..66eff19a3 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -159,33 +159,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint32 { return r.Nip } func (r *PtraceRegs) SetPC(pc uint32) { r.Nip = pc } diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 4df56616b..806aa2574 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -34,7 +34,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5f4243dea..35851ef70 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -38,7 +38,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index d0a7d4066..2f89e8f5d 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -34,7 +34,6 @@ import ( //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index f5c793be2..7ca064ae7 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -31,7 +31,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_netbsd.go index e66865dcc..018d7d478 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -340,7 +340,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -501,7 +500,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { // compat_43_osendmsg // compat_43_osethostid // compat_43_osethostname -// compat_43_osetrlimit // compat_43_osigblock // compat_43_osigsetmask // compat_43_osigstack diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 5e9de23ae..f9c7a9663 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -294,7 +294,6 @@ func Uname(uname *Utsname) error { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setrtable(rtable int) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_solaris.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_solaris.go index d3444b64d..b600a289d 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -545,24 +545,24 @@ func Minor(dev uint64) uint32 { * Expose the ioctl function */ -//sys ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl -//sys ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) = libc.ioctl +//sys ioctlRet(fd int, req int, arg uintptr) (ret int, err error) = libc.ioctl +//sys ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) = libc.ioctl -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { _, err = ioctlRet(fd, req, arg) return err } -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { _, err = ioctlPtrRet(fd, req, arg) return err } -func IoctlSetTermio(fd int, req uint, value *Termio) error { +func IoctlSetTermio(fd int, req int, value *Termio) error { return ioctlPtr(fd, req, unsafe.Pointer(value)) } -func IoctlGetTermio(fd int, req uint) (*Termio, error) { +func IoctlGetTermio(fd int, req int) (*Termio, error) { var value Termio err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err @@ -665,7 +665,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Setuid(uid int) (err error) //sys Shutdown(s int, how int) (err error) = libsocket.shutdown @@ -1080,11 +1079,11 @@ func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags return retCl, retData, flags, nil } -func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { +func IoctlSetIntRetInt(fd int, req int, arg int) (int, error) { return ioctlRet(fd, req, uintptr(arg)) } -func IoctlSetString(fd int, req uint, val string) error { +func IoctlSetString(fd int, req int, val string) error { bs := make([]byte, len(val)+1) copy(bs[:len(bs)-1], val) err := ioctlPtr(fd, req, unsafe.Pointer(&bs[0])) @@ -1120,7 +1119,7 @@ func (l *Lifreq) GetLifruUint() uint { return *(*uint)(unsafe.Pointer(&l.Lifru[0])) } -func IoctlLifreq(fd int, req uint, l *Lifreq) error { +func IoctlLifreq(fd int, req int, l *Lifreq) error { return ioctlPtr(fd, req, unsafe.Pointer(l)) } @@ -1131,6 +1130,6 @@ func (s *Strioctl) SetInt(i int) { s.Dp = (*int8)(unsafe.Pointer(&i)) } -func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { +func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_unix.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_unix.go index 00f0aa375..8e48c29ec 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -587,3 +587,10 @@ func emptyIovecs(iov []Iovec) bool { } return true } + +// Setrlimit sets a resource limit. +func Setrlimit(resource int, rlim *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall.Setrlimit(resource, (*syscall.Rlimit)(rlim)) +} diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b295497ae..d3d49ec3e 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -212,8 +212,8 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___SENDMSG_A //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) = SYS_MMAP //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP -//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL -//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL +//sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 476a1c7e7..143007627 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1270,6 +1270,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1553,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index e36f5178d..ab044a742 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1270,6 +1270,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1553,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index ef9dcd1be..9a257219d 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -124,7 +124,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit64(int, uintptr_t); -int setrlimit64(int, uintptr_t); long long lseek64(int, long long, int); uintptr_t mmap(uintptr_t, uintptr_t, int, int, int, long long); @@ -213,7 +212,7 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(arg)) if r0 == -1 && er != nil { err = er @@ -223,7 +222,7 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(uintptr(arg))) if r0 == -1 && er != nil { err = er @@ -1464,16 +1463,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - r0, er := C.setrlimit64(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, er := C.lseek64(C.int(fd), C.longlong(offset), C.int(whence)) off = int64(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f86a94592..6de80c20c 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -93,8 +93,8 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, e1 := callioctl(fd, int(req), arg) +func ioctl(fd int, req int, arg uintptr) (err error) { + _, e1 := callioctl(fd, req, arg) if e1 != 0 { err = errnoErr(e1) } @@ -103,8 +103,8 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { - _, e1 := callioctl_ptr(fd, int(req), arg) +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { + _, e1 := callioctl_ptr(fd, req, arg) if e1 != 0 { err = errnoErr(e1) } @@ -1422,16 +1422,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, e1 := callsetrlimit(resource, uintptr(unsafe.Pointer(rlim))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, e1 := calllseek(fd, offset, whence) off = int64(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index d32a84cae..c4d50ae50 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -124,7 +124,6 @@ import ( //go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" //go:cgo_import_dynamic libc_umount umount "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.a/shr_64.o" //go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o" //go:cgo_import_dynamic libc_mmap64 mmap64 "libc.a/shr_64.o" @@ -242,7 +241,6 @@ import ( //go:linkname libc_getsystemcfg libc_getsystemcfg //go:linkname libc_umount libc_umount //go:linkname libc_getrlimit libc_getrlimit -//go:linkname libc_setrlimit libc_setrlimit //go:linkname libc_lseek libc_lseek //go:linkname libc_mmap64 libc_mmap64 @@ -363,7 +361,6 @@ var ( libc_getsystemcfg, libc_umount, libc_getrlimit, - libc_setrlimit, libc_lseek, libc_mmap64 syscallFunc ) @@ -1179,13 +1176,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setrlimit)), 2, uintptr(resource), rlim, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_lseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) return diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index d7d8baf81..6903d3b09 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -123,7 +123,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit(int, uintptr_t); -int setrlimit(int, uintptr_t); long long lseek(int, long long, int); uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); @@ -131,6 +130,7 @@ uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); import "C" import ( "syscall" + "unsafe" ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1055,14 +1055,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.setrlimit(C.int(resource), C.uintptr_t(rlim))) - e1 = syscall.GetErrno() - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1 = uintptr(C.lseek(C.int(fd), C.longlong(offset), C.int(whence))) e1 = syscall.GetErrno() diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index a29ffdd56..4037ccf7a 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1992,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2123,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 95fe4c0eb..4baaed0bc 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 2fd4590bb..51d6f3fb2 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1992,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2123,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index efa5b4c98..c3b82c037 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -705,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -759,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 3b8513470..0eabac7ad 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1410,16 +1410,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 112906562..ee313eb00 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 55f5abfe5..4c986e448 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d39651c2b..555216944 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index ddb740868..67a226fbf 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 09a53a616..f0b9ddaaa 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -1645,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 430cb24de..da63d9d78 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1346,16 +1346,6 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c81b0ad47..07b549cc2 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -411,16 +411,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 2206bce7f..5f481bf83 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -334,16 +334,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index edf6b39f1..824cd52c7 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -578,16 +578,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_ARM_SYNC_FILE_RANGE, uintptr(fd), uintptr(flags), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32)) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 190609f21..e77aecfe9 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -289,16 +289,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 5f984cbb1..961a3afb7 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 46fc380a4..ed05005e9 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index cbd0d4dad..d365b718f 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -278,16 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 0c13d15f0..c3f1b8bbd 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -644,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index e01432aed..a6574cf98 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -624,16 +624,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 13c7ee7ba..f40990264 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 02d0c0fd6..9dfcc2997 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -349,16 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 9fee3b1d2..0b2923958 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -269,16 +269,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 647bbfecd..6cde32237 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -319,16 +319,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int64(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index ada057f89..5253d65bf 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -329,16 +329,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 8e1d9c8f6..cdb2af5ae 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 21c695040..9d25f76b0 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 298168f90..d3f803516 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 68b8bd492..887188a52 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1607,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 0b0f910e1..6699a783e 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 087444250..04f0de34b 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 48ff5de75..1e775fe05 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 5782cd108..27b6f4df7 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 2452a641d..7f6427899 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index cf310420c..b797045fd 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 5e35600a6..756ef7b17 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index 484bb42e0..a87126622 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index b04cef1a1..7bc2e24eb 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index 55af27263..05d4bffd7 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 47a07ee0c..739be6217 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 4028255b0..74a25f8d6 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -687,12 +687,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - CALL libc_setrlimit(SB) - RET -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_setrtable(SB) RET diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 573378fdb..7d95a1978 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1894,20 +1894,6 @@ var libc_setresuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setrtable(rtable int) (err error) { _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index e1fbd4dfa..990be2457 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -573,11 +573,6 @@ TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setrtable(SB) GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 4873a1e5d..609d1c598 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -110,7 +110,6 @@ import ( //go:cgo_import_dynamic libc_setpriority setpriority "libc.so" //go:cgo_import_dynamic libc_setregid setregid "libc.so" //go:cgo_import_dynamic libc_setreuid setreuid "libc.so" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" //go:cgo_import_dynamic libc_setsid setsid "libc.so" //go:cgo_import_dynamic libc_setuid setuid "libc.so" //go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so" @@ -250,7 +249,6 @@ import ( //go:linkname procSetpriority libc_setpriority //go:linkname procSetregid libc_setregid //go:linkname procSetreuid libc_setreuid -//go:linkname procSetrlimit libc_setrlimit //go:linkname procSetsid libc_setsid //go:linkname procSetuid libc_setuid //go:linkname procshutdown libc_shutdown @@ -391,7 +389,6 @@ var ( procSetpriority, procSetregid, procSetreuid, - procSetrlimit, procSetsid, procSetuid, procshutdown, @@ -646,7 +643,7 @@ func __minor(version int, dev uint64) (val uint) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { +func ioctlRet(fd int, req int, arg uintptr) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { @@ -657,7 +654,7 @@ func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) { +func ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { @@ -1650,16 +1647,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 07bfe2ef9..c31681743 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -257,7 +257,7 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) @@ -267,7 +267,7 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index e2a64f099..690cefc3d 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/prometheus-to-sd/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 34aa77521..5bffc10ea 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -610,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/prometheus-to-sd/vendor/golang.org/x/sys/windows/types_windows.go b/prometheus-to-sd/vendor/golang.org/x/sys/windows/types_windows.go index 857acf103..0dbb20841 100644 --- a/prometheus-to-sd/vendor/golang.org/x/sys/windows/types_windows.go +++ b/prometheus-to-sd/vendor/golang.org/x/sys/windows/types_windows.go @@ -2229,10 +2229,10 @@ const ( JobObjectExtendedLimitInformation = 9 JobObjectGroupInformation = 11 JobObjectGroupInformationEx = 14 - JobObjectLimitViolationInformation2 = 35 + JobObjectLimitViolationInformation2 = 34 JobObjectNetRateControlInformation = 32 JobObjectNotificationLimitInformation = 12 - JobObjectNotificationLimitInformation2 = 34 + JobObjectNotificationLimitInformation2 = 33 JobObjectSecurityLimitInformation = 5 ) diff --git a/prometheus-to-sd/vendor/google.golang.org/api/googleapi/googleapi.go b/prometheus-to-sd/vendor/google.golang.org/api/googleapi/googleapi.go index d1784f1a3..b328a7976 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/googleapi/googleapi.go @@ -15,6 +15,7 @@ import ( "net/http" "net/url" "strings" + "time" "google.golang.org/api/internal/third_party/uritemplates" ) @@ -78,6 +79,9 @@ type Error struct { Header http.Header Errors []ErrorItem + // err is typically a wrapped apierror.APIError, see + // google-api-go-client/internal/gensupport/error.go. + err error } // ErrorItem is a detailed error code & message from the Google API frontend. @@ -121,6 +125,15 @@ func (e *Error) Error() string { return buf.String() } +// Wrap allows an existing Error to wrap another error. See also [Error.Unwrap]. +func (e *Error) Wrap(err error) { + e.err = err +} + +func (e *Error) Unwrap() error { + return e.err +} + type errorReply struct { Error *Error `json:"error"` } @@ -140,6 +153,7 @@ func CheckResponse(res *http.Response) error { jerr.Error.Code = res.StatusCode } jerr.Error.Body = string(slurp) + jerr.Error.Header = res.Header return jerr.Error } } @@ -172,8 +186,9 @@ func CheckMediaResponse(res *http.Response) error { } slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) return &Error{ - Code: res.StatusCode, - Body: string(slurp), + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, } } @@ -245,12 +260,30 @@ func ChunkSize(size int) MediaOption { return chunkSizeOption(size) } +type chunkRetryDeadlineOption time.Duration + +func (cd chunkRetryDeadlineOption) setOptions(o *MediaOptions) { + o.ChunkRetryDeadline = time.Duration(cd) +} + +// ChunkRetryDeadline returns a MediaOption which sets a per-chunk retry +// deadline. If a single chunk has been attempting to upload for longer than +// this time and the request fails, it will no longer be retried, and the error +// will be returned to the caller. +// This is only applicable for files which are large enough to require +// a multi-chunk resumable upload. +// The default value is 32s. +// To set a deadline on the entire upload, use context timeout or cancellation. +func ChunkRetryDeadline(deadline time.Duration) MediaOption { + return chunkRetryDeadlineOption(deadline) +} + // MediaOptions stores options for customizing media upload. It is not used by developers directly. type MediaOptions struct { ContentType string ForceEmptyContentType bool - - ChunkSize int + ChunkSize int + ChunkRetryDeadline time.Duration } // ProcessMediaOptions stores options from opts in a MediaOptions. @@ -362,11 +395,11 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { // For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, // you could request just those fields like this: // -// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// svc.Events.List().Fields("nextPageToken", "items/id").Do() // // or if you were also interested in each Item's "Updated" field, you can combine them like this: // -// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() // // Another way to find field names is through the Google API explorer: // https://developers.google.com/apis-explorer/#p/ @@ -391,6 +424,14 @@ type CallOption interface { Get() (key, value string) } +// A MultiCallOption is an option argument to an API call and can be passed +// anywhere a CallOption is accepted. It additionally supports returning a slice +// of values for a given key. +type MultiCallOption interface { + CallOption + GetMulti() (key string, value []string) +} + // QuotaUser returns a CallOption that will set the quota user for a call. // The quota user can be used by server-side applications to control accounting. // It can be an arbitrary string up to 40 characters, and will override UserIP @@ -417,4 +458,24 @@ type traceTok string func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } +type queryParameter struct { + key string + values []string +} + +// QueryParameter allows setting the value(s) of an arbitrary key. +func QueryParameter(key string, values ...string) CallOption { + return queryParameter{key: key, values: append([]string{}, values...)} +} + +// Get will never actually be called -- GetMulti will. +func (q queryParameter) Get() (string, string) { + return "", "" +} + +// GetMulti returns the key and values values associated to that key. +func (q queryParameter) GetMulti() (string, []string) { + return q.key, q.values +} + // TODO: Fields too diff --git a/prometheus-to-sd/vendor/google.golang.org/api/googleapi/transport/apikey.go b/prometheus-to-sd/vendor/google.golang.org/api/googleapi/transport/apikey.go index 61720ec2e..f5d826c2a 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -7,7 +7,7 @@ // // This package is DEPRECATED. Users should instead use, // -// service, err := NewService(..., option.WithAPIKey(...)) +// service, err := NewService(..., option.WithAPIKey(...)) package transport import ( diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/cert/default_cert.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/cert/default_cert.go new file mode 100644 index 000000000..21d025153 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/cert/default_cert.go @@ -0,0 +1,58 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "errors" + "sync" +) + +// defaultCertData holds all the variables pertaining to +// the default certficate source created by DefaultSource. +// +// A singleton model is used to allow the source to be reused +// by the transport layer. +type defaultCertData struct { + once sync.Once + source Source + err error +} + +var ( + defaultCert defaultCertData +) + +// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable. +var errSourceUnavailable = errors.New("certificate source is unavailable") + +// DefaultSource returns a certificate source using the preferred EnterpriseCertificateProxySource. +// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource. +// +// If neither source is available (due to missing configurations), a nil Source and a nil Error are +// returned to indicate that a default certificate source is unavailable. +func DefaultSource() (Source, error) { + defaultCert.once.Do(func() { + defaultCert.source, defaultCert.err = NewEnterpriseCertificateProxySource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = NewSecureConnectSource("") + if errors.Is(defaultCert.err, errSourceUnavailable) { + defaultCert.source, defaultCert.err = nil, nil + } + } + }) + return defaultCert.source, defaultCert.err +} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/cert/enterprise_cert.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/cert/enterprise_cert.go new file mode 100644 index 000000000..1061b5f05 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/cert/enterprise_cert.go @@ -0,0 +1,54 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "errors" + + "github.com/googleapis/enterprise-certificate-proxy/client" +) + +type ecpSource struct { + key *client.Key +} + +// NewEnterpriseCertificateProxySource creates a certificate source +// using the Enterprise Certificate Proxy client, which delegates +// certifcate related operations to an OS-specific "signer binary" +// that communicates with the native keystore (ex. keychain on MacOS). +// +// The configFilePath points to a config file containing relevant parameters +// such as the certificate issuer and the location of the signer binary. +// If configFilePath is empty, the client will attempt to load the config from +// a well-known gcloud location. +func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { + key, err := client.Cred(configFilePath) + if err != nil { + if errors.Is(err, client.ErrCredUnavailable) { + return nil, errSourceUnavailable + } + return nil, err + } + + return (&ecpSource{ + key: key, + }).getClientCertificate, nil +} + +func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var cert tls.Certificate + cert.PrivateKey = s.key + cert.Certificate = s.key.CertificateChain() + return &cert, nil +} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go new file mode 100644 index 000000000..5913cab80 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go @@ -0,0 +1,123 @@ +// Copyright 2022 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" + "time" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +type secureConnectSource struct { + metadata secureConnectMetadata + + // Cache the cert to avoid executing helper command repeatedly. + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// NewSecureConnectSource creates a certificate source using +// the Secure Connect Helper and its associated metadata file. +// +// The configFilePath points to the location of the context aware metadata file. +// If configFilePath is empty, use the default context aware metadata location. +func NewSecureConnectSource(configFilePath string) (Source, error) { + if configFilePath == "" { + user, err := user.Current() + if err != nil { + // Error locating the default config means Secure Connect is not supported. + return nil, errSourceUnavailable + } + configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) + } + + file, err := ioutil.ReadFile(configFilePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // Config file missing means Secure Connect is not supported. + return nil, errSourceUnavailable + } + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + s.cachedCertMutex.Lock() + defer s.cachedCertMutex.Unlock() + if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) { + return s.cachedCert, nil + } + // Expand OS environment variables in the cert provider command such as "$HOME". + for i := 0; i < len(s.metadata.Cmd); i++ { + s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i]) + } + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + s.cachedCert = &cert + return &cert, nil +} + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/creds.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/creds.go index 75e9445e1..63c660922 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/internal/creds.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/creds.go @@ -6,40 +6,63 @@ package internal import ( "context" + "crypto/tls" "encoding/json" + "errors" "fmt" "io/ioutil" + "net" + "net/http" + "os" + "time" "golang.org/x/oauth2" + "google.golang.org/api/internal/impersonate" "golang.org/x/oauth2/google" ) +const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + // Creds returns credential information obtained from DialSettings, or if none, then // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + creds, err := baseCreds(ctx, ds) + if err != nil { + return nil, err + } + if ds.ImpersonationConfig != nil { + return impersonateCredentials(ctx, creds, ds) + } + return creds, nil +} + +func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + if ds.InternalCredentials != nil { + return ds.InternalCredentials, nil + } if ds.Credentials != nil { return ds.Credentials, nil } if ds.CredentialsJSON != nil { - return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences) + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { data, err := ioutil.ReadFile(ds.CredentialsFile) if err != nil { return nil, fmt.Errorf("cannot read credentials file: %v", err) } - return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences) + return credentialsFromJSON(ctx, data, ds) } if ds.TokenSource != nil { return &google.Credentials{TokenSource: ds.TokenSource}, nil } - cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...) + cred, err := google.FindDefaultCredentials(ctx, ds.GetScopes()...) if err != nil { return nil, err } if len(cred.JSON) > 0 { - return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences) + return credentialsFromJSON(ctx, cred.JSON, ds) } // For GAE and GCE, the JSON is empty so return the default credentials directly. return cred, nil @@ -50,56 +73,151 @@ const ( serviceAccountKey = "service_account" ) -// credentialsFromJSON returns a google.Credentials based on the input. +// credentialsFromJSON returns a google.Credentials from the JSON data +// +// - A self-signed JWT flow will be executed if the following conditions are +// met: +// +// (1) At least one of the following is true: +// (a) No scope is provided +// (b) Scope for self-signed JWT flow is enabled +// (c) Audiences are explicitly provided by users +// (2) No service account impersontation // -// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow -// - Otherwise, returns OAuth 2.0 flow. -func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) { - cred, err := google.CredentialsFromJSON(ctx, data, scopes...) +// - Otherwise, executes standard OAuth 2.0 flow +// More details: google.aip.dev/auth/4111 +func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { + var params google.CredentialsParams + params.Scopes = ds.GetScopes() + + // Determine configurations for the OAuth2 transport, which is separate from the API transport. + // The OAuth2 transport and endpoint will be configured for mTLS if applicable. + clientCertSource, oauth2Endpoint, err := GetClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) if err != nil { return nil, err } - if len(data) > 0 && len(scopes) == 0 { - var f struct { - Type string `json:"type"` - // The rest JSON fields are omitted because they are not used. + params.TokenURL = oauth2Endpoint + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, } - if err := json.Unmarshal(cred.JSON, &f); err != nil { + ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) + } + + // By default, a standard OAuth 2.0 token source is created + cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) + if err != nil { + return nil, err + } + + // Override the token source to use self-signed JWT if conditions are met + isJWTFlow, err := isSelfSignedJWTFlow(data, ds) + if err != nil { + return nil, err + } + if isJWTFlow { + ts, err := selfSignedJWTTokenSource(data, ds) + if err != nil { return nil, err } - if f.Type == serviceAccountKey { - ts, err := selfSignedJWTTokenSource(data, endpoint, audiences) - if err != nil { - return nil, err - } - cred.TokenSource = ts - } + cred.TokenSource = ts } + return cred, err } -func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) { - // Use the API endpoint as the default audience - audience := endpoint - if len(audiences) > 0 { - // TODO(shinfan): Update golang oauth to support multiple audiences. - if len(audiences) > 1 { - return nil, fmt.Errorf("multiple audiences support is not implemented") +func isSelfSignedJWTFlow(data []byte, ds *DialSettings) (bool, error) { + if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && + ds.ImpersonationConfig == nil { + // Check if JSON is a service account and if so create a self-signed JWT. + var f struct { + Type string `json:"type"` + // The rest JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(data, &f); err != nil { + return false, err } - audience = audiences[0] + return f.Type == serviceAccountKey, nil } - return google.JWTAccessTokenSourceFromJSON(data, audience) + return false, nil } -// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. -// -// NOTE(cbro): consider promoting this to a field on google.Credentials. -func QuotaProjectFromCreds(cred *google.Credentials) string { +func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource, error) { + if len(ds.GetScopes()) > 0 && !ds.HasCustomAudience() { + // Scopes are preferred in self-signed JWT unless the scope is not available + // or a custom audience is used. + return google.JWTAccessTokenSourceWithScope(data, ds.GetScopes()...) + } else if ds.GetAudience() != "" { + // Fallback to audience if scope is not provided + return google.JWTAccessTokenSourceFromJSON(data, ds.GetAudience()) + } else { + return nil, errors.New("neither scopes or audience are available for the self-signed JWT") + } +} + +// GetQuotaProject retrieves quota project with precedence being: client option, +// environment variable, creds file. +func GetQuotaProject(creds *google.Credentials, clientOpt string) string { + if clientOpt != "" { + return clientOpt + } + if env := os.Getenv(quotaProjectEnvVar); env != "" { + return env + } + if creds == nil { + return "" + } var v struct { QuotaProject string `json:"quota_project_id"` } - if err := json.Unmarshal(cred.JSON, &v); err != nil { + if err := json.Unmarshal(creds.JSON, &v); err != nil { return "" } return v.QuotaProject } + +func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds *DialSettings) (*google.Credentials, error) { + if len(ds.ImpersonationConfig.Scopes) == 0 { + ds.ImpersonationConfig.Scopes = ds.GetScopes() + } + ts, err := impersonate.TokenSource(ctx, creds.TokenSource, ds.ImpersonationConfig) + if err != nil { + return nil, err + } + return &google.Credentials{ + TokenSource: ts, + ProjectID: creds.ProjectID, + }, nil +} + +// oauth2DialSettings returns the settings to be used by the OAuth2 transport, which is separate from the API transport. +func oauth2DialSettings(ds *DialSettings) *DialSettings { + var ods DialSettings + ods.DefaultEndpoint = google.Endpoint.TokenURL + ods.DefaultMTLSEndpoint = google.MTLSTokenURL + ods.ClientCertSource = ds.ClientCertSource + return &ods +} + +// customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func customHTTPClient(tlsConfig *tls.Config) *http.Client { + trans := baseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +func baseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/dca.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/dca.go new file mode 100644 index 000000000..204a3fd2f --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/dca.go @@ -0,0 +1,144 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dca contains utils for implementing Device Certificate +// Authentication according to https://google.aip.dev/auth/4114 +// +// The overall logic for DCA is as follows: +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. +// +// Implications of the above logic: +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. +// +// We would like to avoid introducing client-side logic that parses whether the +// endpoint override is an mTLS url, since the url pattern may change at anytime. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. + +// Package internal supports the options and transport packages. +package internal + +import ( + "net/url" + "os" + "strings" + + "google.golang.org/api/internal/cert" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" +) + +// GetClientCertificateSourceAndEndpoint is a convenience function that invokes +// getClientCertificateSource and getEndpoint sequentially and returns the client +// cert source and endpoint as a tuple. +func GetClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } + return clientCertSource, endpoint, nil +} + +// getClientCertificateSource returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +// +// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE +// must be set to "true" to allow certificate to be used (including user provided +// certificates). For details, see AIP-4114. +func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { + if !isClientCertificateEnabled() { + return nil, nil + } else if settings.ClientCertSource != nil { + return settings.ClientCertSource, nil + } else { + return cert.DefaultSource() + } +} + +func isClientCertificateEnabled() bool { + useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") + // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. + return strings.ToLower(useClientCert) == "true" +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { + if settings.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + return settings.DefaultMTLSEndpoint, nil + } + return settings.DefaultEndpoint, nil + } + if strings.Contains(settings.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return settings.Endpoint, nil + } + if settings.DefaultEndpoint == "" { + // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. + // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. + return settings.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) +} + +func getMTLSMode() string { + mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") + if mode == "" { + mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} + +func mergeEndpoints(baseURL, newHost string) (string, error) { + u, err := url.Parse(fixScheme(baseURL)) + if err != nil { + return "", err + } + return strings.Replace(baseURL, u.Host, newHost, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + return "https://" + baseURL + } + return baseURL +} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/error.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/error.go new file mode 100644 index 000000000..886c6532b --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/error.go @@ -0,0 +1,24 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + + "github.com/googleapis/gax-go/v2/apierror" + "google.golang.org/api/googleapi" +) + +// WrapError creates an [apierror.APIError] from err, wraps it in err, and +// returns err. If err is not a [googleapi.Error] (or a +// [google.golang.org/grpc/status.Status]), it returns err without modification. +func WrapError(err error) error { + var herr *googleapi.Error + apiError, ok := apierror.ParseError(err, false) + if ok && errors.As(err, &herr) { + herr.Wrap(apiError) + } + return err +} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/json.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/json.go index c01e32189..eab49a11e 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/json.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/json.go @@ -13,9 +13,10 @@ import ( // MarshalJSON returns a JSON encoding of schema containing only selected fields. // A field is selected if any of the following is true: -// * it has a non-empty value -// * its field name is present in forceSendFields and it is not a nil pointer or nil interface -// * its field name is present in nullFields. +// - it has a non-empty value +// - its field name is present in forceSendFields and it is not a nil pointer or nil interface +// - its field name is present in nullFields. +// // The JSON key for each selected field is taken from the field's json: struct tag. func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { if len(forceSendFields) == 0 && len(nullFields) == 0 { @@ -85,7 +86,12 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { ms, ok := v.Interface().(map[string]string) if !ok { - return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name) + mi, err := initMapSlow(v, f.Name, useNullMaps) + if err != nil { + return nil, err + } + m[tag.apiName] = mi + continue } mi := map[string]interface{}{} for k, v := range ms { @@ -119,6 +125,25 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu return m, nil } +// initMapSlow uses reflection to build up a map object. This is slower than +// the default behavior so it should be used only as a fallback. +func initMapSlow(rv reflect.Value, fieldName string, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { + mi := map[string]interface{}{} + iter := rv.MapRange() + for iter.Next() { + k, ok := iter.Key().Interface().(string) + if !ok { + return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]any", fieldName) + } + v := iter.Value().Interface() + mi[k] = v + } + for k := range useNullMaps[fieldName] { + mi[k] = nil + } + return mi, nil +} + // formatAsString returns a string representation of v, dereferencing it first if possible. func formatAsString(v reflect.Value, kind reflect.Kind) string { if kind == reflect.Ptr && !v.IsNil() { diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/media.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/media.go index 0288cc304..8356e7f27 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/media.go @@ -15,93 +15,12 @@ import ( "net/textproto" "strings" "sync" + "time" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" ) -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// If the content type is already known, it can be specified via ctype. -// Otherwise, the content of media will be sniffed to determine the content type. -// If media implements googleapi.ContentTyper (deprecated), this will be used -// instead of sniffing the content. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { - // Note: callers could avoid calling DetectContentType if ctype != "", - // but doing the check inside this function reduces the amount of - // generated code. - if ctype != "" { - return media, ctype - } - - // For backwards compatability, allow clients to set content - // type by providing a ContentTyper for media. - if typer, ok := media.(googleapi.ContentTyper); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} - type typeReader struct { io.Reader typ string @@ -217,12 +136,13 @@ func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer // code only. type MediaInfo struct { // At most one of Media and MediaBuffer will be set. - media io.Reader - buffer *MediaBuffer - singleChunk bool - mType string - size int64 // mediaSize, if known. Used only for calls to progressUpdater_. - progressUpdater googleapi.ProgressUpdater + media io.Reader + buffer *MediaBuffer + singleChunk bool + mType string + size int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater googleapi.ProgressUpdater + chunkRetryDeadline time.Duration } // NewInfoFromMedia should be invoked from the Media method of a call. It returns a @@ -232,8 +152,12 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { mi := &MediaInfo{} opts := googleapi.ProcessMediaOptions(options) if !opts.ForceEmptyContentType { - r, mi.mType = DetermineContentType(r, opts.ContentType) + mi.mType = opts.ContentType + if mi.mType == "" { + r, mi.mType = gax.DetermineContentType(r) + } } + mi.chunkRetryDeadline = opts.ChunkRetryDeadline mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) return mi } @@ -242,7 +166,11 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { // call. It returns a MediaInfo using the given reader, size and media type. func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { rdr := ReaderAtToReader(r, size) - rdr, mType := DetermineContentType(rdr, mediaType) + mType := mediaType + if mType == "" { + rdr, mType = gax.DetermineContentType(rdr) + } + return &MediaInfo{ size: size, mType: mType, @@ -286,13 +214,12 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB // be retried because the data is stored in the MediaBuffer. media, _, _, _ = mi.buffer.Chunk() } + toCleanup := []io.Closer{} if media != nil { fb := readerFunc(body) fm := readerFunc(media) combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) - toCleanup := []io.Closer{ - combined, - } + toCleanup = append(toCleanup, combined) if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { rb := ioutil.NopCloser(fb()) @@ -306,18 +233,30 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB return r, nil } } - cleanup = func() { - for _, closer := range toCleanup { - _ = closer.Close() - } - - } reqHeaders.Set("Content-Type", ctype) body = combined } if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + // This happens when initiating a resumable upload session. + // The initial request contains a JSON body rather than media. + // It can be retried with a getBody function that re-creates the request body. + fb := readerFunc(body) + if fb != nil { + getBody = func() (io.ReadCloser, error) { + rb := ioutil.NopCloser(fb()) + toCleanup = append(toCleanup, rb) + return rb, nil + } + } reqHeaders.Set("X-Upload-Content-Type", mi.mType) } + // Ensure that any bodies created in getBody are cleaned up. + cleanup = func() { + for _, closer := range toCleanup { + _ = closer.Close() + } + + } return body, getBody, cleanup } @@ -356,6 +295,7 @@ func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { mi.progressUpdater(curr, mi.size) } }, + ChunkRetryDeadline: mi.chunkRetryDeadline, } } diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/params.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/params.go index 0e878a425..1a30d2ca2 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/params.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/params.go @@ -37,15 +37,21 @@ func (u URLParams) SetMulti(key string, values []string) { u[key] = values } -// Encode encodes the values into ``URL encoded'' form +// Encode encodes the values into “URL encoded” form // ("bar=baz&foo=quux") sorted by key. func (u URLParams) Encode() string { return url.Values(u).Encode() } -// SetOptions sets the URL params and any additional call options. +// SetOptions sets the URL params and any additional `CallOption` or +// `MultiCallOption` passed in. func SetOptions(u URLParams, opts ...googleapi.CallOption) { for _, o := range opts { + m, ok := o.(googleapi.MultiCallOption) + if ok { + u.SetMulti(m.GetMulti()) + continue + } u.Set(o.Get()) } } diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/resumable.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/resumable.go index edc87ec24..f168ea6d2 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -10,34 +10,12 @@ import ( "fmt" "io" "net/http" + "strings" "sync" "time" - gax "github.com/googleapis/gax-go/v2" -) - -// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their -// own implementation. -type Backoff interface { - Pause() time.Duration -} - -// These are declared as global variables so that tests can overwrite them. -var ( - retryDeadline = 32 * time.Second - backoff = func() Backoff { - return &gax.Backoff{Initial: 100 * time.Millisecond} - } - // isRetryable is a platform-specific hook, specified in retryable_linux.go - syscallRetryable func(error) bool = func(err error) bool { return false } -) - -const ( - // statusTooManyRequests is returned by the storage API if the - // per-project limits have been temporarily exceeded. The request - // should be retried. - // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes - statusTooManyRequests = 429 + "github.com/google/uuid" + "google.golang.org/api/internal" ) // ResumableUpload is used by the generated APIs to provide resumable uploads. @@ -57,6 +35,18 @@ type ResumableUpload struct { // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. Callback func(int64) + + // Retry optionally configures retries for requests made against the upload. + Retry *RetryConfig + + // ChunkRetryDeadline configures the per-chunk deadline after which no further + // retries should happen. + ChunkRetryDeadline time.Duration + + // Track current request invocation ID and attempt count for retry metric + // headers. + invocationID string + attempts int } // Progress returns the number of bytes uploaded at this point. @@ -91,6 +81,10 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) + baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) + req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) + // Google's upload endpoint uses status code 308 for a // different purpose than the "308 Permanent Redirect" // since-standardized in RFC 7238. Because of the conflict in @@ -174,29 +168,69 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err } return nil, err } + // This case is very unlikely but possible only if rx.ChunkRetryDeadline is + // set to a very small value, in which case no requests will be sent before + // the deadline. Return an error to avoid causing a panic. + if resp == nil { + return nil, fmt.Errorf("upload request to %v not sent, choose larger value for ChunkRetryDealine", rx.URI) + } return resp, nil } + // Configure retryable error criteria. + errorFunc := rx.Retry.errorFunc() + + // Configure per-chunk retry deadline. + var retryDeadline time.Duration + if rx.ChunkRetryDeadline != 0 { + retryDeadline = rx.ChunkRetryDeadline + } else { + retryDeadline = defaultRetryDeadline + } // Send all chunks. for { var pause time.Duration - // Each chunk gets its own initialized-at-zero retry. - bo := backoff() - quitAfter := time.After(retryDeadline) + // Each chunk gets its own initialized-at-zero backoff and invocation ID. + bo := rx.Retry.backoff() + quitAfterTimer := time.NewTimer(retryDeadline) + rx.attempts = 1 + rx.invocationID = uuid.New().String() // Retry loop for a single chunk. for { + pauseTimer := time.NewTimer(pause) select { case <-ctx.Done(): + quitAfterTimer.Stop() + pauseTimer.Stop() if err == nil { err = ctx.Err() } return prepareReturn(resp, err) - case <-time.After(pause): - case <-quitAfter: + case <-pauseTimer.C: + case <-quitAfterTimer.C: + pauseTimer.Stop() return prepareReturn(resp, err) } + pauseTimer.Stop() + + // Check for context cancellation or timeout once more. If more than one + // case in the select statement above was satisfied at the same time, Go + // will choose one arbitrarily. + // That can cause an operation to go through even if the context was + // canceled before or the timeout was reached. + select { + case <-ctx.Done(): + quitAfterTimer.Stop() + if err == nil { + err = ctx.Err() + } + return prepareReturn(resp, err) + case <-quitAfterTimer.C: + return prepareReturn(resp, err) + default: + } resp, err = rx.transferChunk(ctx) @@ -206,10 +240,12 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err } // Check if we should retry the request. - if !shouldRetry(status, err) { + if !errorFunc(status, err) { + quitAfterTimer.Stop() break } + rx.attempts++ pause = bo.Pause() if resp != nil && resp.Body != nil { resp.Body.Close() @@ -226,33 +262,3 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err return prepareReturn(resp, err) } } - -// shouldRetry indicates whether an error is retryable for the purposes of this -// package, following guidance from -// https://cloud.google.com/storage/docs/exponential-backoff . -func shouldRetry(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if status == statusTooManyRequests { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - // Transient network errors should be retried. - if syscallRetryable(err) { - return true - } - if err, ok := err.(interface{ Temporary() bool }); ok { - if err.Temporary() { - return true - } - } - // If Go 1.13 error unwrapping is available, use this to examine wrapped - // errors. - if err, ok := err.(interface{ Unwrap() error }); ok { - return shouldRetry(status, err.Unwrap()) - } - return false -} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/retry.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/retry.go new file mode 100644 index 000000000..20b57d925 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/retry.go @@ -0,0 +1,121 @@ +// Copyright 2021 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + "io" + "net" + "strings" + "time" + + "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" +) + +// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their +// own implementation. +type Backoff interface { + Pause() time.Duration +} + +// These are declared as global variables so that tests can overwrite them. +var ( + // Default per-chunk deadline for resumable uploads. + defaultRetryDeadline = 32 * time.Second + // Default backoff timer. + backoff = func() Backoff { + return &gax.Backoff{Initial: 100 * time.Millisecond} + } + // syscallRetryable is a platform-specific hook, specified in retryable_linux.go + syscallRetryable func(error) bool = func(err error) bool { return false } +) + +const ( + // statusTooManyRequests is returned by the storage API if the + // per-project limits have been temporarily exceeded. The request + // should be retried. + // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes + statusTooManyRequests = 429 + + // statusRequestTimeout is returned by the storage API if the + // upload connection was broken. The request should be retried. + statusRequestTimeout = 408 +) + +// shouldRetry indicates whether an error is retryable for the purposes of this +// package, unless a ShouldRetry func is specified by the RetryConfig instead. +// It follows guidance from +// https://cloud.google.com/storage/docs/exponential-backoff . +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if status == statusTooManyRequests || status == statusRequestTimeout { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + // Transient network errors should be retried. + if syscallRetryable(err) { + return true + } + if err, ok := err.(interface{ Temporary() bool }); ok { + if err.Temporary() { + return true + } + } + var opErr *net.OpError + if errors.As(err, &opErr) { + if strings.Contains(opErr.Error(), "use of closed network connection") { + // TODO: check against net.ErrClosed (go 1.16+) instead of string + return true + } + } + + // If Go 1.13 error unwrapping is available, use this to examine wrapped + // errors. + if err, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, err.Unwrap()) + } + return false +} + +// RetryConfig allows configuration of backoff timing and retryable errors. +type RetryConfig struct { + Backoff *gax.Backoff + ShouldRetry func(err error) bool +} + +// Get a new backoff object based on the configured values. +func (r *RetryConfig) backoff() Backoff { + if r == nil || r.Backoff == nil { + return backoff() + } + return &gax.Backoff{ + Initial: r.Backoff.Initial, + Max: r.Backoff.Max, + Multiplier: r.Backoff.Multiplier, + } +} + +// This is kind of hacky; it is necessary because ShouldRetry expects to +// handle HTTP errors via googleapi.Error, but the error has not yet been +// wrapped with a googleapi.Error at this layer, and the ErrorFunc type +// in the manual layer does not pass in a status explicitly as it does +// here. So, we must wrap error status codes in a googleapi.Error so that +// ShouldRetry can parse this correctly. +func (r *RetryConfig) errorFunc() func(status int, err error) bool { + if r == nil || r.ShouldRetry == nil { + return shouldRetry + } + return func(status int, err error) bool { + if status >= 400 { + return r.ShouldRetry(&googleapi.Error{Code: status}) + } + return r.ShouldRetry(err) + } +} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go index fed998b5d..a916c3da2 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package gensupport diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/send.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/send.go index 3338c8d19..85c7bcbfd 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/gensupport/send.go @@ -8,25 +8,34 @@ import ( "context" "encoding/json" "errors" + "fmt" "net/http" + "strings" "time" + + "github.com/google/uuid" + "github.com/googleapis/gax-go/v2" ) -// Hook is the type of a function that is called once before each HTTP request -// that is sent by a generated API. It returns a function that is called after -// the request returns. -// Hooks are not called if the context is nil. -type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) - -var hooks []Hook - -// RegisterHook registers a Hook to be called before each HTTP request by a -// generated API. Hooks are called in the order they are registered. Each -// hook can return a function; if it is non-nil, it is called after the HTTP -// request returns. These functions are called in the reverse order. -// RegisterHook should not be called concurrently with itself or SendRequest. -func RegisterHook(h Hook) { - hooks = append(hooks, h) +// Use this error type to return an error which allows introspection of both +// the context error and the error from the service. +type wrappedCallErr struct { + ctxErr error + wrappedErr error +} + +func (e wrappedCallErr) Error() string { + return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) +} + +func (e wrappedCallErr) Unwrap() error { + return e.wrappedErr +} + +// Is allows errors.Is to match the error from the call as well as context +// sentinel errors. +func (e wrappedCallErr) Is(target error) bool { + return errors.Is(e.ctxErr, target) || errors.Is(e.wrappedErr, target) } // SendRequest sends a single HTTP request using the given client. @@ -42,23 +51,7 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (* if ctx == nil { return client.Do(req) } - // Call hooks in order of registration, store returned funcs. - post := make([]func(resp *http.Response), len(hooks)) - for i, h := range hooks { - fn := h(ctx, req) - post[i] = fn - } - - // Send request. - resp, err := send(ctx, client, req) - - // Call returned funcs in reverse order. - for i := len(post) - 1; i >= 0; i-- { - if fn := post[i]; fn != nil { - fn(resp) - } - } - return resp, err + return send(ctx, client, req) } func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { @@ -83,7 +76,7 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re // If ctx is non-nil, it calls all hooks, then sends the request with // req.WithContext, then calls any functions returned by the hooks in // reverse order. -func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { +func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) { // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { @@ -92,48 +85,62 @@ func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Re if ctx == nil { return client.Do(req) } - // Call hooks in order of registration, store returned funcs. - post := make([]func(resp *http.Response), len(hooks)) - for i, h := range hooks { - fn := h(ctx, req) - post[i] = fn - } - - // Send request with retry. - resp, err := sendAndRetry(ctx, client, req) - - // Call returned funcs in reverse order. - for i := len(post) - 1; i >= 0; i-- { - if fn := post[i]; fn != nil { - fn(resp) - } - } - return resp, err + return sendAndRetry(ctx, client, req, retry) } -func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { +func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) { if client == nil { client = http.DefaultClient } var resp *http.Response var err error + attempts := 1 + invocationID := uuid.New().String() + baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") // Loop to retry the request, up to the context deadline. var pause time.Duration - bo := backoff() + var bo Backoff + if retry != nil && retry.Backoff != nil { + bo = &gax.Backoff{ + Initial: retry.Backoff.Initial, + Max: retry.Backoff.Max, + Multiplier: retry.Backoff.Multiplier, + } + } else { + bo = backoff() + } + + var errorFunc = retry.errorFunc() for { + t := time.NewTimer(pause) select { case <-ctx.Done(): - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err == nil { - err = ctx.Err() + t.Stop() + // If we got an error and the context has been canceled, return an error acknowledging + // both the context cancelation and the service error. + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} + } + return resp, ctx.Err() + case <-t.C: + } + + if ctx.Err() != nil { + // Check for context cancellation once more. If more than one case in a + // select is satisfied at the same time, Go will choose one arbitrarily. + // That can cause an operation to go through even if the context was + // canceled before. + if err != nil { + return resp, wrappedCallErr{ctx.Err(), err} } - return resp, err - case <-time.After(pause): + return resp, ctx.Err() } + invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") + req.Header.Set("X-Goog-Api-Client", xGoogHeader) resp, err = client.Do(req.WithContext(ctx)) @@ -145,9 +152,10 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) ( // Check if we can retry the request. A retry can only be done if the error // is retryable and the request body can be re-created using GetBody (this // will not be possible if the body was unbuffered). - if req.GetBody == nil || !shouldRetry(status, err) { + if req.GetBody == nil || !errorFunc(status, err) { break } + attempts++ var errBody error req.Body, errBody = req.GetBody() if errBody != nil { diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/impersonate/impersonate.go new file mode 100644 index 000000000..b465bbcd1 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/impersonate/impersonate.go @@ -0,0 +1,128 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package impersonate is used to impersonate Google Credentials. +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +// Config for generating impersonated credentials. +type Config struct { + // Target is the service account to impersonate. Required. + Target string + // Scopes the impersonated credential should have. Required. + Scopes []string + // Delegates are the service accounts in a delegation chain. Each service + // account must be granted roles/iam.serviceAccountTokenCreator on the next + // service account in the chain. Optional. + Delegates []string +} + +// TokenSource returns an impersonated TokenSource configured with the provided +// config using ts as the base credential provider for making requests. +func TokenSource(ctx context.Context, ts oauth2.TokenSource, config *Config) (oauth2.TokenSource, error) { + if len(config.Scopes) == 0 { + return nil, fmt.Errorf("impersonate: scopes must be provided") + } + its := impersonatedTokenSource{ + ctx: ctx, + ts: ts, + name: formatIAMServiceAccountName(config.Target), + // Default to the longest acceptable value of one hour as the token will + // be refreshed automatically. + lifetime: "3600s", + } + + its.delegates = make([]string, len(config.Delegates)) + for i, v := range config.Delegates { + its.delegates[i] = formatIAMServiceAccountName(v) + } + its.scopes = make([]string, len(config.Scopes)) + copy(its.scopes, config.Scopes) + + return oauth2.ReuseTokenSource(nil, its), nil +} + +func formatIAMServiceAccountName(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} + +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type generateAccessTokenResp struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +type impersonatedTokenSource struct { + ctx context.Context + ts oauth2.TokenSource + + name string + lifetime string + scopes []string + delegates []string +} + +// Token returns an impersonated Token. +func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { + hc := oauth2.NewClient(i.ctx, i.ts) + reqBody := generateAccessTokenReq{ + Delegates: i.delegates, + Lifetime: i.lifetime, + Scope: i.scopes, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) + } + url := fmt.Sprintf("https://iamcredentials.googleapis.com/v1/%s:generateAccessToken", i.name) + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %v", err) + } + req = req.WithContext(i.ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := hc.Do(req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var accessTokenResp generateAccessTokenResp + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + }, nil +} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/service-account.json b/prometheus-to-sd/vendor/google.golang.org/api/internal/service-account.json deleted file mode 100644 index 6b36a9296..000000000 --- a/prometheus-to-sd/vendor/google.golang.org/api/internal/service-account.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "type": "service_account", - "project_id": "project_id", - "private_key_id": "private_key_id", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCzd9ZdbPLAR4/g\nj+Rodu15kEasMpxf/Mz+gKRb2fmgR2Y18Y/iRBYZ4SkmF2pBSfzvwE/aTCzSPBGl\njHhPzohXnSN029eWoItmxVONlqCbR29pD07aLzv08LGeIGdHIEdhVjhvRwTkYZIF\ndXmlHNDRUU/EbJN9D+3ahw22BNnC4PaDgfIWTs3xIlTCSf2rL39I4DSNLTS/LzxK\n/XrQfBMtfwMWwyQaemXbc7gRgzOy8L56wa1W1zyXx99th97j1bLnoAXBGplhB4Co\n25ohyDAuhxRm+XGMEaO0Mzo7u97kvhj48a569RH1QRhOf7EBf60jO4h5eOmfi5P5\nPV3l7041AgMBAAECggEAEZ0RTNoEeRqM5F067YW+iM/AH+ZXspP9Cn1VpC4gcbqQ\nLXsnw+0qvh97CmIB66Z3TJBzRdl0DK4YjUbcB/kdKHwjnrR01DOtesijCqJd4N+B\n762w73jzSXbV9872U+S3HLZ5k3JE6KUqz55X8fyCAgkY6w4862lEzs2yasrPFHEV\nRoQp3PM0Miif8R3hGDhOWcHxcobullthG6JHAQFfc1ctwEjZI4TK0iWqlzfWGyKN\nT9UgvjUDud5cGvS9el0AiLN6keAf77tcPn1zetUVhxN1KN4bVAm1Q+6O8esl63Rj\n7JXpHzxaRnit9S6/aH/twHsGGtLg5Puw6jey6xs4AQKBgQD2JNy1wzewCRkD+jug\n8CHbJ+LIJVRNIaWa/RK1QD8/UjmFPkIzRQSF3AKC5mRAWSa2FL3yVK3N/DD7hazW\n85XSBB7IDcnoJnA9SkUeWwqQGkDx3EntlU3gX8Kn/+ofF8O9jLXxAa901MAVXVuf\n5YDzrl4PNE3bFnPCdiNmSdRfhQKBgQC6p4DsCpwqbeTu9f5ak9VW/fQP47Fgt+Mf\nwGjBnKP5PbbNJpHCfamF7jqSRH83Xy0KNssH7jD/NZ2oT594sMmiQPUC5ni9VYY6\nsuYB0JbD5Mq+EjKIVhYtxaQJ76LzHreEI+G4z6k3H7/hRpr3/C48n9G/uVkT9DbJ\noplxxEx68QKBgQCdJ23vcwO0Firtmi/GEmtbVHz70rGfSXNFoHz4UlvPXv0wsE5u\nE4vOt2i3EMhDOWh46odYGG6bzH+tp2xyFTW70Dui+QLHgPs6dpfoyLHWzZxXj5F3\n6lK9hgZvYvqk/XRRKmzjwnK2wjsdqOyeC1covlR5mqh20D/6kZkKbur0TQKBgAwy\nCZBimRWEnKKoW/gbFKNccGfhXqONID/g2Hdd/rC4QYth68AjacIgcJ9B7nX1uAGk\n1tsryvPB0w0+NpMyKdp6GAgaeuUUA3MuYSzZLiCagEyu77JMvaI7+Z3UlHcCGMd/\neK4Uk1/QqT7U2Cc/yN2ZK6E1QQa2vCWshA4U31JhAoGAbtbSSSsul1c+PsJ13Cfk\n6qVnqYzPqt23QTyOZmGAvUHH/M4xRiQpOE0cDF4t/r5PwenAQPQzTvMmWRzj6uAY\n3eaU0eAK7ZfoweCoOIAPnpFbbRLrXfoY46H7MYh7euWGXOKEpxz5yzuEkd9ByNUE\n86vSEidqbMIiXVgEgnu/k08=\n-----END PRIVATE KEY-----\n", - "client_email": "xyz@developer.gserviceaccount.com", - "client_id": "123", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com" -} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/settings.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/settings.go index 3b779c256..76efdb227 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/internal/settings.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/settings.go @@ -12,33 +12,41 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" + "google.golang.org/api/internal/impersonate" "google.golang.org/grpc" ) // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { - Endpoint string - DefaultEndpoint string - DefaultMTLSEndpoint string - Scopes []string - TokenSource oauth2.TokenSource - Credentials *google.Credentials - CredentialsFile string // if set, Token Source is ignored. - CredentialsJSON []byte - UserAgent string - APIKey string - Audiences []string - HTTPClient *http.Client - GRPCDialOpts []grpc.DialOption - GRPCConn *grpc.ClientConn - GRPCConnPool ConnPool - GRPCConnPoolSize int - NoAuth bool - TelemetryDisabled bool - ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) - CustomClaims map[string]interface{} - SkipValidation bool + Endpoint string + DefaultEndpoint string + DefaultMTLSEndpoint string + Scopes []string + DefaultScopes []string + EnableJwtWithScope bool + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + InternalCredentials *google.Credentials + UserAgent string + APIKey string + Audiences []string + DefaultAudience string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + GRPCConnPool ConnPool + GRPCConnPoolSize int + NoAuth bool + TelemetryDisabled bool + ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + CustomClaims map[string]interface{} + SkipValidation bool + ImpersonationConfig *impersonate.Config + EnableDirectPath bool + AllowNonDefaultServiceAccount bool // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters @@ -46,6 +54,28 @@ type DialSettings struct { RequestReason string } +// GetScopes returns the user-provided scopes, if set, or else falls back to the +// default scopes. +func (ds *DialSettings) GetScopes() []string { + if len(ds.Scopes) > 0 { + return ds.Scopes + } + return ds.DefaultScopes +} + +// GetAudience returns the user-provided audience, if set, or else falls back to the default audience. +func (ds *DialSettings) GetAudience() string { + if ds.HasCustomAudience() { + return ds.Audiences[0] + } + return ds.DefaultAudience +} + +// HasCustomAudience returns true if a custom audience is provided by users. +func (ds *DialSettings) HasCustomAudience() bool { + return len(ds.Audiences) > 0 +} + // Validate reports an error if ds is invalid. func (ds *DialSettings) Validate() error { if ds.SkipValidation { @@ -105,6 +135,8 @@ func (ds *DialSettings) Validate() error { if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) { return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible") } - + if ds.ImpersonationConfig != nil && len(ds.ImpersonationConfig.Scopes) == 0 && len(ds.Scopes) == 0 { + return errors.New("WithImpersonatedCredentials requires scopes being provided") + } return nil } diff --git a/prometheus-to-sd/vendor/google.golang.org/api/internal/version.go b/prometheus-to-sd/vendor/google.golang.org/api/internal/version.go new file mode 100644 index 000000000..7a4f6d898 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/api/internal/version.go @@ -0,0 +1,8 @@ +// Copyright 2022 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// Version is the current tagged release of the library. +const Version = "0.114.0" diff --git a/prometheus-to-sd/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json b/prometheus-to-sd/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json index 70c868616..6490d4738 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json +++ b/prometheus-to-sd/vendor/google.golang.org/api/monitoring/v3/monitoring-api.json @@ -3,7 +3,7 @@ "oauth2": { "scopes": { "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" + "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account." }, "https://www.googleapis.com/auth/monitoring": { "description": "View and write monitoring data for all of your Google and third-party Cloud and API projects" @@ -21,7 +21,7 @@ "baseUrl": "https://monitoring.googleapis.com/", "batchPath": "batch", "canonicalName": "Monitoring", - "description": "Manages your Cloud Monitoring data and configurations. Most projects must be associated with a Workspace, with a few exceptions as noted on the individual method pages. The table entries below are presented in alphabetical order, not in order of common use. For explanations of the concepts found in the table entries, read the Cloud Monitoring documentation.", + "description": "Manages your Cloud Monitoring data and configurations.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/monitoring/api/", "fullyEncodeReservedExpansion": true, @@ -114,12 +114,558 @@ }, "protocol": "rest", "resources": { + "folders": { + "resources": { + "timeSeries": { + "methods": { + "list": { + "description": "Lists time series that match a filter.", + "flatPath": "v3/folders/{foldersId}/timeSeries", + "httpMethod": "GET", + "id": "monitoring.folders.timeSeries.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "aggregation.alignmentPeriod": { + "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", + "format": "google-duration", + "location": "query", + "type": "string" + }, + "aggregation.crossSeriesReducer": { + "description": "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.", + "enum": [ + "REDUCE_NONE", + "REDUCE_MEAN", + "REDUCE_MIN", + "REDUCE_MAX", + "REDUCE_SUM", + "REDUCE_STDDEV", + "REDUCE_COUNT", + "REDUCE_COUNT_TRUE", + "REDUCE_COUNT_FALSE", + "REDUCE_FRACTION_TRUE", + "REDUCE_PERCENTILE_99", + "REDUCE_PERCENTILE_95", + "REDUCE_PERCENTILE_50", + "REDUCE_PERCENTILE_05" + ], + "enumDescriptions": [ + "No cross-time series reduction. The output of the Aligner is returned.", + "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + ], + "location": "query", + "type": "string" + }, + "aggregation.groupByFields": { + "description": "The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.", + "location": "query", + "repeated": true, + "type": "string" + }, + "aggregation.perSeriesAligner": { + "description": "An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.", + "enum": [ + "ALIGN_NONE", + "ALIGN_DELTA", + "ALIGN_RATE", + "ALIGN_INTERPOLATE", + "ALIGN_NEXT_OLDER", + "ALIGN_MIN", + "ALIGN_MAX", + "ALIGN_MEAN", + "ALIGN_COUNT", + "ALIGN_SUM", + "ALIGN_STDDEV", + "ALIGN_COUNT_TRUE", + "ALIGN_COUNT_FALSE", + "ALIGN_FRACTION_TRUE", + "ALIGN_PERCENTILE_99", + "ALIGN_PERCENTILE_95", + "ALIGN_PERCENTILE_50", + "ALIGN_PERCENTILE_05", + "ALIGN_PERCENT_CHANGE" + ], + "enumDescriptions": [ + "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + ], + "location": "query", + "type": "string" + }, + "filter": { + "description": "Required. A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example: metric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND metric.labels.instance_name = \"my-instance-name\" ", + "location": "query", + "type": "string" + }, + "interval.endTime": { + "description": "Required. The end of the time interval.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "interval.startTime": { + "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name), organization or folder on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID] folders/[FOLDER_ID] ", + "location": "path", + "pattern": "^folders/[^/]+$", + "required": true, + "type": "string" + }, + "orderBy": { + "description": "Unsupported: must be left blank. The points in each time series are currently returned in reverse time order (most recent to oldest).", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "A positive number that is the maximum number of results to return. If page_size is empty or more than 100,000 results, the effective page_size is 100,000 results. If view is set to FULL, this is the maximum number of Points returned. If view is set to HEADERS, this is the maximum number of TimeSeries returned.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + "location": "query", + "type": "string" + }, + "secondaryAggregation.alignmentPeriod": { + "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", + "format": "google-duration", + "location": "query", + "type": "string" + }, + "secondaryAggregation.crossSeriesReducer": { + "description": "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.", + "enum": [ + "REDUCE_NONE", + "REDUCE_MEAN", + "REDUCE_MIN", + "REDUCE_MAX", + "REDUCE_SUM", + "REDUCE_STDDEV", + "REDUCE_COUNT", + "REDUCE_COUNT_TRUE", + "REDUCE_COUNT_FALSE", + "REDUCE_FRACTION_TRUE", + "REDUCE_PERCENTILE_99", + "REDUCE_PERCENTILE_95", + "REDUCE_PERCENTILE_50", + "REDUCE_PERCENTILE_05" + ], + "enumDescriptions": [ + "No cross-time series reduction. The output of the Aligner is returned.", + "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + ], + "location": "query", + "type": "string" + }, + "secondaryAggregation.groupByFields": { + "description": "The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.", + "location": "query", + "repeated": true, + "type": "string" + }, + "secondaryAggregation.perSeriesAligner": { + "description": "An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.", + "enum": [ + "ALIGN_NONE", + "ALIGN_DELTA", + "ALIGN_RATE", + "ALIGN_INTERPOLATE", + "ALIGN_NEXT_OLDER", + "ALIGN_MIN", + "ALIGN_MAX", + "ALIGN_MEAN", + "ALIGN_COUNT", + "ALIGN_SUM", + "ALIGN_STDDEV", + "ALIGN_COUNT_TRUE", + "ALIGN_COUNT_FALSE", + "ALIGN_FRACTION_TRUE", + "ALIGN_PERCENTILE_99", + "ALIGN_PERCENTILE_95", + "ALIGN_PERCENTILE_50", + "ALIGN_PERCENTILE_05", + "ALIGN_PERCENT_CHANGE" + ], + "enumDescriptions": [ + "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + ], + "location": "query", + "type": "string" + }, + "view": { + "description": "Required. Specifies which information is returned about the time series.", + "enum": [ + "FULL", + "HEADERS" + ], + "enumDescriptions": [ + "Returns the identity of the metric(s), the time series, and the time series data.", + "Returns the identity of the metric and the time series resource, but not the time series data." + ], + "location": "query", + "type": "string" + } + }, + "path": "v3/{+name}/timeSeries", + "response": { + "$ref": "ListTimeSeriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ] + } + } + } + } + }, + "organizations": { + "resources": { + "timeSeries": { + "methods": { + "list": { + "description": "Lists time series that match a filter.", + "flatPath": "v3/organizations/{organizationsId}/timeSeries", + "httpMethod": "GET", + "id": "monitoring.organizations.timeSeries.list", + "parameterOrder": [ + "name" + ], + "parameters": { + "aggregation.alignmentPeriod": { + "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", + "format": "google-duration", + "location": "query", + "type": "string" + }, + "aggregation.crossSeriesReducer": { + "description": "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.", + "enum": [ + "REDUCE_NONE", + "REDUCE_MEAN", + "REDUCE_MIN", + "REDUCE_MAX", + "REDUCE_SUM", + "REDUCE_STDDEV", + "REDUCE_COUNT", + "REDUCE_COUNT_TRUE", + "REDUCE_COUNT_FALSE", + "REDUCE_FRACTION_TRUE", + "REDUCE_PERCENTILE_99", + "REDUCE_PERCENTILE_95", + "REDUCE_PERCENTILE_50", + "REDUCE_PERCENTILE_05" + ], + "enumDescriptions": [ + "No cross-time series reduction. The output of the Aligner is returned.", + "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + ], + "location": "query", + "type": "string" + }, + "aggregation.groupByFields": { + "description": "The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.", + "location": "query", + "repeated": true, + "type": "string" + }, + "aggregation.perSeriesAligner": { + "description": "An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.", + "enum": [ + "ALIGN_NONE", + "ALIGN_DELTA", + "ALIGN_RATE", + "ALIGN_INTERPOLATE", + "ALIGN_NEXT_OLDER", + "ALIGN_MIN", + "ALIGN_MAX", + "ALIGN_MEAN", + "ALIGN_COUNT", + "ALIGN_SUM", + "ALIGN_STDDEV", + "ALIGN_COUNT_TRUE", + "ALIGN_COUNT_FALSE", + "ALIGN_FRACTION_TRUE", + "ALIGN_PERCENTILE_99", + "ALIGN_PERCENTILE_95", + "ALIGN_PERCENTILE_50", + "ALIGN_PERCENTILE_05", + "ALIGN_PERCENT_CHANGE" + ], + "enumDescriptions": [ + "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + ], + "location": "query", + "type": "string" + }, + "filter": { + "description": "Required. A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example: metric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND metric.labels.instance_name = \"my-instance-name\" ", + "location": "query", + "type": "string" + }, + "interval.endTime": { + "description": "Required. The end of the time interval.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "interval.startTime": { + "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", + "format": "google-datetime", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name), organization or folder on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID] folders/[FOLDER_ID] ", + "location": "path", + "pattern": "^organizations/[^/]+$", + "required": true, + "type": "string" + }, + "orderBy": { + "description": "Unsupported: must be left blank. The points in each time series are currently returned in reverse time order (most recent to oldest).", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "A positive number that is the maximum number of results to return. If page_size is empty or more than 100,000 results, the effective page_size is 100,000 results. If view is set to FULL, this is the maximum number of Points returned. If view is set to HEADERS, this is the maximum number of TimeSeries returned.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + "location": "query", + "type": "string" + }, + "secondaryAggregation.alignmentPeriod": { + "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", + "format": "google-duration", + "location": "query", + "type": "string" + }, + "secondaryAggregation.crossSeriesReducer": { + "description": "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.", + "enum": [ + "REDUCE_NONE", + "REDUCE_MEAN", + "REDUCE_MIN", + "REDUCE_MAX", + "REDUCE_SUM", + "REDUCE_STDDEV", + "REDUCE_COUNT", + "REDUCE_COUNT_TRUE", + "REDUCE_COUNT_FALSE", + "REDUCE_FRACTION_TRUE", + "REDUCE_PERCENTILE_99", + "REDUCE_PERCENTILE_95", + "REDUCE_PERCENTILE_50", + "REDUCE_PERCENTILE_05" + ], + "enumDescriptions": [ + "No cross-time series reduction. The output of the Aligner is returned.", + "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + ], + "location": "query", + "type": "string" + }, + "secondaryAggregation.groupByFields": { + "description": "The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.", + "location": "query", + "repeated": true, + "type": "string" + }, + "secondaryAggregation.perSeriesAligner": { + "description": "An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.", + "enum": [ + "ALIGN_NONE", + "ALIGN_DELTA", + "ALIGN_RATE", + "ALIGN_INTERPOLATE", + "ALIGN_NEXT_OLDER", + "ALIGN_MIN", + "ALIGN_MAX", + "ALIGN_MEAN", + "ALIGN_COUNT", + "ALIGN_SUM", + "ALIGN_STDDEV", + "ALIGN_COUNT_TRUE", + "ALIGN_COUNT_FALSE", + "ALIGN_FRACTION_TRUE", + "ALIGN_PERCENTILE_99", + "ALIGN_PERCENTILE_95", + "ALIGN_PERCENTILE_50", + "ALIGN_PERCENTILE_05", + "ALIGN_PERCENT_CHANGE" + ], + "enumDescriptions": [ + "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + ], + "location": "query", + "type": "string" + }, + "view": { + "description": "Required. Specifies which information is returned about the time series.", + "enum": [ + "FULL", + "HEADERS" + ], + "enumDescriptions": [ + "Returns the identity of the metric(s), the time series, and the time series data.", + "Returns the identity of the metric and the time series resource, but not the time series data." + ], + "location": "query", + "type": "string" + } + }, + "path": "v3/{+name}/timeSeries", + "response": { + "$ref": "ListTimeSeriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ] + } + } + } + } + }, "projects": { "resources": { "alertPolicies": { "methods": { "create": { - "description": "Creates a new alerting policy.", + "description": "Creates a new alerting policy.Design your application to single-thread API calls that modify the state of alerting policies in a single project. This includes calls to CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.", "flatPath": "v3/projects/{projectsId}/alertPolicies", "httpMethod": "POST", "id": "monitoring.projects.alertPolicies.create", @@ -128,7 +674,7 @@ ], "parameters": { "name": { - "description": "Required. The project in which to create the alerting policy. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\nNote that this field names the parent container in which the alerting policy will be written, not the name of the created policy. |name| must be a host project of a workspace, otherwise INVALID_ARGUMENT error will return. The alerting policy that is returned will have a name that contains a normalized representation of this name as a prefix but adds a suffix of the form /alertPolicies/[ALERT_POLICY_ID], identifying the policy in the container.", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) in which to create the alerting policy. The format is: projects/[PROJECT_ID_OR_NUMBER] Note that this field names the parent container in which the alerting policy will be written, not the name of the created policy. |name| must be a host project of a Metrics Scope, otherwise INVALID_ARGUMENT error will return. The alerting policy that is returned will have a name that contains a normalized representation of this name as a prefix but adds a suffix of the form /alertPolicies/[ALERT_POLICY_ID], identifying the policy in the container.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -148,7 +694,7 @@ ] }, "delete": { - "description": "Deletes an alerting policy.", + "description": "Deletes an alerting policy.Design your application to single-thread API calls that modify the state of alerting policies in a single project. This includes calls to CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.", "flatPath": "v3/projects/{projectsId}/alertPolicies/{alertPoliciesId}", "httpMethod": "DELETE", "id": "monitoring.projects.alertPolicies.delete", @@ -157,7 +703,7 @@ ], "parameters": { "name": { - "description": "Required. The alerting policy to delete. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]\nFor more information, see AlertPolicy.", + "description": "Required. The alerting policy to delete. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] For more information, see AlertPolicy.", "location": "path", "pattern": "^projects/[^/]+/alertPolicies/[^/]+$", "required": true, @@ -183,7 +729,7 @@ ], "parameters": { "name": { - "description": "Required. The alerting policy to retrieve. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]\n", + "description": "Required. The alerting policy to retrieve. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] ", "location": "path", "pattern": "^projects/[^/]+/alertPolicies/[^/]+$", "required": true, @@ -215,7 +761,7 @@ "type": "string" }, "name": { - "description": "Required. The project whose alert policies are to be listed. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\nNote that this field names the parent container in which the alerting policies to be listed are stored. To retrieve a single alerting policy by name, use the GetAlertPolicy operation, instead.", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) whose alert policies are to be listed. The format is: projects/[PROJECT_ID_OR_NUMBER] Note that this field names the parent container in which the alerting policies to be listed are stored. To retrieve a single alerting policy by name, use the GetAlertPolicy operation, instead.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -249,7 +795,7 @@ ] }, "patch": { - "description": "Updates an alerting policy. You can either replace the entire policy with a new one or replace only certain fields in the current alerting policy by specifying the fields to be updated via updateMask. Returns the updated alerting policy.", + "description": "Updates an alerting policy. You can either replace the entire policy with a new one or replace only certain fields in the current alerting policy by specifying the fields to be updated via updateMask. Returns the updated alerting policy.Design your application to single-thread API calls that modify the state of alerting policies in a single project. This includes calls to CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.", "flatPath": "v3/projects/{projectsId}/alertPolicies/{alertPoliciesId}", "httpMethod": "PATCH", "id": "monitoring.projects.alertPolicies.patch", @@ -258,14 +804,14 @@ ], "parameters": { "name": { - "description": "Required if the policy exists. The resource name for this policy. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]\n[ALERT_POLICY_ID] is assigned by Stackdriver Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.", + "description": "Required if the policy exists. The resource name for this policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] [ALERT_POLICY_ID] is assigned by Cloud Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.", "location": "path", "pattern": "^projects/[^/]+/alertPolicies/[^/]+$", "required": true, "type": "string" }, "updateMask": { - "description": "Optional. A list of alerting policy field names. If this field is not empty, each listed field in the existing alerting policy is set to the value of the corresponding field in the supplied policy (alert_policy), or to the field's default value if the field is not in the supplied alerting policy. Fields not listed retain their previous value.Examples of valid field masks include display_name, documentation, documentation.content, documentation.mime_type, user_labels, user_label.nameofkey, enabled, conditions, combiner, etc.If this field is empty, then the supplied alerting policy replaces the existing policy. It is the same as deleting the existing policy and adding the supplied policy, except for the following:\nThe new policy will have the same [ALERT_POLICY_ID] as the former policy. This gives you continuity with the former policy in your notifications and incidents.\nConditions in the new policy will keep their former [CONDITION_ID] if the supplied condition includes the name field with that [CONDITION_ID]. If the supplied condition omits the name field, then a new [CONDITION_ID] is created.", + "description": "Optional. A list of alerting policy field names. If this field is not empty, each listed field in the existing alerting policy is set to the value of the corresponding field in the supplied policy (alert_policy), or to the field's default value if the field is not in the supplied alerting policy. Fields not listed retain their previous value.Examples of valid field masks include display_name, documentation, documentation.content, documentation.mime_type, user_labels, user_label.nameofkey, enabled, conditions, combiner, etc.If this field is empty, then the supplied alerting policy replaces the existing policy. It is the same as deleting the existing policy and adding the supplied policy, except for the following: The new policy will have the same [ALERT_POLICY_ID] as the former policy. This gives you continuity with the former policy in your notifications and incidents. Conditions in the new policy will keep their former [CONDITION_ID] if the supplied condition includes the name field with that [CONDITION_ID]. If the supplied condition omits the name field, then a new [CONDITION_ID] is created.", "format": "google-fieldmask", "location": "query", "type": "string" @@ -288,7 +834,7 @@ "collectdTimeSeries": { "methods": { "create": { - "description": "Stackdriver Monitoring Agent only: Creates a new time series.\u003caside class=\"caution\"\u003eThis method is only for use by the Stackdriver Monitoring Agent. Use projects.timeSeries.create instead.\u003c/aside\u003e", + "description": "Cloud Monitoring Agent only: Creates a new time series.This method is only for use by the Cloud Monitoring Agent. Use projects.timeSeries.create instead.", "flatPath": "v3/projects/{projectsId}/collectdTimeSeries", "httpMethod": "POST", "id": "monitoring.projects.collectdTimeSeries.create", @@ -297,7 +843,7 @@ ], "parameters": { "name": { - "description": "The project in which to create the time series. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "The project (https://cloud.google.com/monitoring/api/v3#project_name) in which to create the time series. The format is: projects/[PROJECT_ID_OR_NUMBER] ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -331,7 +877,7 @@ ], "parameters": { "name": { - "description": "Required. The project in which to create the group. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) in which to create the group. The format is: projects/[PROJECT_ID_OR_NUMBER] ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -365,7 +911,7 @@ ], "parameters": { "name": { - "description": "Required. The group to delete. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\n", + "description": "Required. The group to delete. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] ", "location": "path", "pattern": "^projects/[^/]+/groups/[^/]+$", "required": true, @@ -396,7 +942,7 @@ ], "parameters": { "name": { - "description": "Required. The group to retrieve. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\n", + "description": "Required. The group to retrieve. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] ", "location": "path", "pattern": "^projects/[^/]+/groups/[^/]+$", "required": true, @@ -423,22 +969,22 @@ ], "parameters": { "ancestorsOfGroup": { - "description": "A group name. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\nReturns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty.", + "description": "A group name. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty.", "location": "query", "type": "string" }, "childrenOfGroup": { - "description": "A group name. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\nReturns groups whose parent_name field contains the group name. If no groups have this parent, the results are empty.", + "description": "A group name. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns groups whose parent_name field contains the group name. If no groups have this parent, the results are empty.", "location": "query", "type": "string" }, "descendantsOfGroup": { - "description": "A group name. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\nReturns the descendants of the specified group. This is a superset of the results returned by the children_of_group filter, and includes children-of-children, and so forth.", + "description": "A group name. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns the descendants of the specified group. This is a superset of the results returned by the children_of_group filter, and includes children-of-children, and so forth.", "location": "query", "type": "string" }, "name": { - "description": "Required. The project whose groups are to be listed. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) whose groups are to be listed. The format is: projects/[PROJECT_ID_OR_NUMBER] ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -476,7 +1022,7 @@ ], "parameters": { "name": { - "description": "Output only. The name of this group. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\nWhen creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique [GROUP_ID] that is generated automatically.", + "description": "Output only. The name of this group. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique [GROUP_ID] that is generated automatically.", "location": "path", "pattern": "^projects/[^/]+/groups/[^/]+$", "required": true, @@ -514,7 +1060,7 @@ ], "parameters": { "filter": { - "description": "An optional list filter (https://cloud.google.com/monitoring/api/learn_more#filtering) describing the members to be returned. The filter may reference the type, labels, and metadata of monitored resources that comprise the group. For example, to return only resources representing Compute Engine VM instances, use this filter:\n`resource.type = \"gce_instance\"`\n", + "description": "An optional list filter (https://cloud.google.com/monitoring/api/learn_more#filtering) describing the members to be returned. The filter may reference the type, labels, and metadata of monitored resources that comprise the group. For example, to return only resources representing Compute Engine VM instances, use this filter: `resource.type = \"gce_instance\"` ", "location": "query", "type": "string" }, @@ -531,7 +1077,7 @@ "type": "string" }, "name": { - "description": "Required. The group whose members are listed. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\n", + "description": "Required. The group whose members are listed. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] ", "location": "path", "pattern": "^projects/[^/]+/groups/[^/]+$", "required": true, @@ -566,7 +1112,7 @@ "metricDescriptors": { "methods": { "create": { - "description": "Creates a new metric descriptor. User-created metric descriptors define custom metrics (https://cloud.google.com/monitoring/custom-metrics).", + "description": "Creates a new metric descriptor. The creation is executed asynchronously. User-created metric descriptors define custom metrics (https://cloud.google.com/monitoring/custom-metrics). The metric descriptor is updated if it already exists, except that metric labels are never removed.", "flatPath": "v3/projects/{projectsId}/metricDescriptors", "httpMethod": "POST", "id": "monitoring.projects.metricDescriptors.create", @@ -575,7 +1121,7 @@ ], "parameters": { "name": { - "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: 4 projects/PROJECT_ID_OR_NUMBER", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -605,7 +1151,7 @@ ], "parameters": { "name": { - "description": "Required. The metric descriptor on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]\nAn example of [METRIC_ID] is: \"custom.googleapis.com/my_test_metric\".", + "description": "Required. The metric descriptor on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] An example of [METRIC_ID] is: \"custom.googleapis.com/my_test_metric\".", "location": "path", "pattern": "^projects/[^/]+/metricDescriptors/.*$", "required": true, @@ -622,7 +1168,7 @@ ] }, "get": { - "description": "Gets a single metric descriptor. This method does not require a Workspace.", + "description": "Gets a single metric descriptor.", "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", "httpMethod": "GET", "id": "monitoring.projects.metricDescriptors.get", @@ -631,7 +1177,7 @@ ], "parameters": { "name": { - "description": "Required. The metric descriptor on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]\nAn example value of [METRIC_ID] is \"compute.googleapis.com/instance/disk/read_bytes_count\".", + "description": "Required. The metric descriptor on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] An example value of [METRIC_ID] is \"compute.googleapis.com/instance/disk/read_bytes_count\".", "location": "path", "pattern": "^projects/[^/]+/metricDescriptors/.*$", "required": true, @@ -650,7 +1196,7 @@ ] }, "list": { - "description": "Lists metric descriptors that match a filter. This method does not require a Workspace.", + "description": "Lists metric descriptors that match a filter.", "flatPath": "v3/projects/{projectsId}/metricDescriptors", "httpMethod": "GET", "id": "monitoring.projects.metricDescriptors.list", @@ -659,12 +1205,12 @@ ], "parameters": { "filter": { - "description": "If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the filter (https://cloud.google.com/monitoring/api/v3/filters) specifies which metric descriptors are to be returned. For example, the following filter matches all custom metrics (https://cloud.google.com/monitoring/custom-metrics):\nmetric.type = starts_with(\"custom.googleapis.com/\")\n", + "description": "If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the filter (https://cloud.google.com/monitoring/api/v3/filters) specifies which metric descriptors are to be returned. For example, the following filter matches all custom metrics (https://cloud.google.com/monitoring/custom-metrics): metric.type = starts_with(\"custom.googleapis.com/\") ", "location": "query", "type": "string" }, "name": { - "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -698,7 +1244,7 @@ "monitoredResourceDescriptors": { "methods": { "get": { - "description": "Gets a single monitored resource descriptor. This method does not require a Workspace.", + "description": "Gets a single monitored resource descriptor.", "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors/{monitoredResourceDescriptorsId}", "httpMethod": "GET", "id": "monitoring.projects.monitoredResourceDescriptors.get", @@ -707,7 +1253,7 @@ ], "parameters": { "name": { - "description": "Required. The monitored resource descriptor to get. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE]\nThe [RESOURCE_TYPE] is a predefined type, such as cloudsql_database.", + "description": "Required. The monitored resource descriptor to get. The format is: projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] The [RESOURCE_TYPE] is a predefined type, such as cloudsql_database.", "location": "path", "pattern": "^projects/[^/]+/monitoredResourceDescriptors/.*$", "required": true, @@ -726,7 +1272,7 @@ ] }, "list": { - "description": "Lists monitored resource descriptors that match a filter. This method does not require a Workspace.", + "description": "Lists monitored resource descriptors that match a filter.", "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors", "httpMethod": "GET", "id": "monitoring.projects.monitoredResourceDescriptors.list", @@ -735,12 +1281,12 @@ ], "parameters": { "filter": { - "description": "An optional filter (https://cloud.google.com/monitoring/api/v3/filters) describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an id label:\nresource.type = starts_with(\"gce_\") AND resource.label:id\n", + "description": "An optional filter (https://cloud.google.com/monitoring/api/v3/filters) describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an id label: resource.type = starts_with(\"gce_\") AND resource.label:id ", "location": "query", "type": "string" }, "name": { - "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -783,7 +1329,7 @@ ], "parameters": { "name": { - "description": "Required. The channel type for which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE]\n", + "description": "Required. The channel type for which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] ", "location": "path", "pattern": "^projects/[^/]+/notificationChannelDescriptors/[^/]+$", "required": true, @@ -810,7 +1356,7 @@ ], "parameters": { "name": { - "description": "Required. The REST resource name of the parent from which to retrieve the notification channel descriptors. The expected syntax is:\nprojects/[PROJECT_ID_OR_NUMBER]\nNote that this names the parent container in which to look for the descriptors; to retrieve a single descriptor by name, use the GetNotificationChannelDescriptor operation, instead.", + "description": "Required. The REST resource name of the parent from which to retrieve the notification channel descriptors. The expected syntax is: projects/[PROJECT_ID_OR_NUMBER] Note that this names (https://cloud.google.com/monitoring/api/v3#project_name) the parent container in which to look for the descriptors; to retrieve a single descriptor by name, use the GetNotificationChannelDescriptor operation, instead.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -843,7 +1389,7 @@ "notificationChannels": { "methods": { "create": { - "description": "Creates a new notification channel, representing a single notification endpoint such as an email address, SMS number, or PagerDuty service.", + "description": "Creates a new notification channel, representing a single notification endpoint such as an email address, SMS number, or PagerDuty service.Design your application to single-thread API calls that modify the state of notification channels in a single project. This includes calls to CreateNotificationChannel, DeleteNotificationChannel and UpdateNotificationChannel.", "flatPath": "v3/projects/{projectsId}/notificationChannels", "httpMethod": "POST", "id": "monitoring.projects.notificationChannels.create", @@ -852,7 +1398,7 @@ ], "parameters": { "name": { - "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\nThis names the container into which the channel will be written, this does not name the newly created channel. The resulting channel's name will have a normalized version of this field as a prefix, but will add /notificationChannels/[CHANNEL_ID] to identify the channel.", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] This names the container into which the channel will be written, this does not name the newly created channel. The resulting channel's name will have a normalized version of this field as a prefix, but will add /notificationChannels/[CHANNEL_ID] to identify the channel.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -872,7 +1418,7 @@ ] }, "delete": { - "description": "Deletes a notification channel.", + "description": "Deletes a notification channel.Design your application to single-thread API calls that modify the state of notification channels in a single project. This includes calls to CreateNotificationChannel, DeleteNotificationChannel and UpdateNotificationChannel.", "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}", "httpMethod": "DELETE", "id": "monitoring.projects.notificationChannels.delete", @@ -886,7 +1432,7 @@ "type": "boolean" }, "name": { - "description": "Required. The channel for which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]\n", + "description": "Required. The channel for which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] ", "location": "path", "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", "required": true, @@ -912,7 +1458,7 @@ ], "parameters": { "name": { - "description": "Required. The channel for which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]\n", + "description": "Required. The channel for which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] ", "location": "path", "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", "required": true, @@ -973,7 +1519,7 @@ "type": "string" }, "name": { - "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\nThis names the container in which to look for the notification channels; it does not name a specific channel. To query a specific channel by REST resource name, use the GetNotificationChannel operation.", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] This names the container in which to look for the notification channels; it does not name a specific channel. To query a specific channel by REST resource name, use the GetNotificationChannel operation.", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -1007,7 +1553,7 @@ ] }, "patch": { - "description": "Updates a notification channel. Fields not specified in the field mask remain unchanged.", + "description": "Updates a notification channel. Fields not specified in the field mask remain unchanged.Design your application to single-thread API calls that modify the state of notification channels in a single project. This includes calls to CreateNotificationChannel, DeleteNotificationChannel and UpdateNotificationChannel.", "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}", "httpMethod": "PATCH", "id": "monitoring.projects.notificationChannels.patch", @@ -1016,7 +1562,7 @@ ], "parameters": { "name": { - "description": "The full REST resource name for this channel. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]\nThe [CHANNEL_ID] is automatically assigned by the server on creation.", + "description": "The full REST resource name for this channel. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] The [CHANNEL_ID] is automatically assigned by the server on creation.", "location": "path", "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", "required": true, @@ -1101,6 +1647,144 @@ } } }, + "snoozes": { + "methods": { + "create": { + "description": "Creates a Snooze that will prevent alerts, which match the provided criteria, from being opened. The Snooze applies for a specific time interval.", + "flatPath": "v3/projects/{projectsId}/snoozes", + "httpMethod": "POST", + "id": "monitoring.projects.snoozes.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) in which a Snooze should be created. The format is: projects/[PROJECT_ID_OR_NUMBER] ", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v3/{+parent}/snoozes", + "request": { + "$ref": "Snooze" + }, + "response": { + "$ref": "Snooze" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ] + }, + "get": { + "description": "Retrieves a Snooze by name.", + "flatPath": "v3/projects/{projectsId}/snoozes/{snoozesId}", + "httpMethod": "GET", + "id": "monitoring.projects.snoozes.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The ID of the Snooze to retrieve. The format is: projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] ", + "location": "path", + "pattern": "^projects/[^/]+/snoozes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v3/{+name}", + "response": { + "$ref": "Snooze" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ] + }, + "list": { + "description": "Lists the Snoozes associated with a project. Can optionally pass in filter, which specifies predicates to match Snoozes.", + "flatPath": "v3/projects/{projectsId}/snoozes", + "httpMethod": "GET", + "id": "monitoring.projects.snoozes.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Optional. Optional filter to restrict results to the given criteria. The following fields are supported. interval.start_time interval.end_timeFor example: ``` interval.start_time \u003e \"2022-03-11T00:00:00-08:00\" AND interval.end_time \u003c \"2022-03-12T00:00:00-08:00\" ``` ", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. The maximum number of results to return for a single query. The server may further constrain the maximum number of results returned in a single page. The value should be in the range 1, 1000. If the value given is outside this range, the server will decide the number of results to be returned.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The next_page_token from a previous call to ListSnoozesRequest to get the next page of results.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) whose Snoozes should be listed. The format is: projects/[PROJECT_ID_OR_NUMBER] ", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v3/{+parent}/snoozes", + "response": { + "$ref": "ListSnoozesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read" + ] + }, + "patch": { + "description": "Updates a Snooze, identified by its name, with the parameters in the given Snooze object.", + "flatPath": "v3/projects/{projectsId}/snoozes/{snoozesId}", + "httpMethod": "PATCH", + "id": "monitoring.projects.snoozes.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the Snooze. The format is: projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] The ID of the Snooze will be generated by the system.", + "location": "path", + "pattern": "^projects/[^/]+/snoozes/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Required. The fields to update.For each field listed in update_mask: If the Snooze object supplied in the UpdateSnoozeRequest has a value for that field, the value of the field in the existing Snooze will be set to the value of the field in the supplied Snooze. If the field does not have a value in the supplied Snooze, the field in the existing Snooze is set to its default value.Fields not listed retain their existing value.The following are the field names that are accepted in update_mask: display_name interval.start_time interval.end_timeThat said, the start time and end time of the Snooze determines which fields can legally be updated. Before attempting an update, users should consult the documentation for UpdateSnoozeRequest, which talks about which fields can be updated.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v3/{+name}", + "request": { + "$ref": "Snooze" + }, + "response": { + "$ref": "Snooze" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring" + ] + } + } + }, "timeSeries": { "methods": { "create": { @@ -1113,7 +1797,7 @@ ], "parameters": { "name": { - "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -1133,8 +1817,38 @@ "https://www.googleapis.com/auth/monitoring.write" ] }, + "createService": { + "description": "Creates or adds data to one or more service time series. A service time series is a time series for a metric from a Google Cloud service. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response. This endpoint rejects writes to user-defined metrics. This method is only for use by Google Cloud services. Use projects.timeSeries.create instead.", + "flatPath": "v3/projects/{projectsId}/timeSeries:createService", + "httpMethod": "POST", + "id": "monitoring.projects.timeSeries.createService", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] ", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v3/{+name}/timeSeries:createService", + "request": { + "$ref": "CreateTimeSeriesRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.write" + ] + }, "list": { - "description": "Lists time series that match a filter. This method does not require a Workspace.", + "description": "Lists time series that match a filter.", "flatPath": "v3/projects/{projectsId}/timeSeries", "httpMethod": "GET", "id": "monitoring.projects.timeSeries.list", @@ -1143,7 +1857,7 @@ ], "parameters": { "aggregation.alignmentPeriod": { - "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds, at most 104 weeks. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.", + "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", "format": "google-duration", "location": "query", "type": "string" @@ -1166,6 +1880,22 @@ "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05" ], + "enumDescriptions": [ + "No cross-time series reduction. The output of the Aligner is returned.", + "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + ], "location": "query", "type": "string" }, @@ -1198,11 +1928,32 @@ "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE" ], + "enumDescriptions": [ + "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + ], "location": "query", "type": "string" }, "filter": { - "description": "Required. A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example:\nmetric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND\n metric.labels.instance_name = \"my-instance-name\"\n", + "description": "Required. A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example: metric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND metric.labels.instance_name = \"my-instance-name\" ", "location": "query", "type": "string" }, @@ -1219,7 +1970,7 @@ "type": "string" }, "name": { - "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name), organization or folder on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID] folders/[FOLDER_ID] ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -1241,12 +1992,112 @@ "location": "query", "type": "string" }, + "secondaryAggregation.alignmentPeriod": { + "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", + "format": "google-duration", + "location": "query", + "type": "string" + }, + "secondaryAggregation.crossSeriesReducer": { + "description": "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.", + "enum": [ + "REDUCE_NONE", + "REDUCE_MEAN", + "REDUCE_MIN", + "REDUCE_MAX", + "REDUCE_SUM", + "REDUCE_STDDEV", + "REDUCE_COUNT", + "REDUCE_COUNT_TRUE", + "REDUCE_COUNT_FALSE", + "REDUCE_FRACTION_TRUE", + "REDUCE_PERCENTILE_99", + "REDUCE_PERCENTILE_95", + "REDUCE_PERCENTILE_50", + "REDUCE_PERCENTILE_05" + ], + "enumDescriptions": [ + "No cross-time series reduction. The output of the Aligner is returned.", + "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + ], + "location": "query", + "type": "string" + }, + "secondaryAggregation.groupByFields": { + "description": "The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.", + "location": "query", + "repeated": true, + "type": "string" + }, + "secondaryAggregation.perSeriesAligner": { + "description": "An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.", + "enum": [ + "ALIGN_NONE", + "ALIGN_DELTA", + "ALIGN_RATE", + "ALIGN_INTERPOLATE", + "ALIGN_NEXT_OLDER", + "ALIGN_MIN", + "ALIGN_MAX", + "ALIGN_MEAN", + "ALIGN_COUNT", + "ALIGN_SUM", + "ALIGN_STDDEV", + "ALIGN_COUNT_TRUE", + "ALIGN_COUNT_FALSE", + "ALIGN_FRACTION_TRUE", + "ALIGN_PERCENTILE_99", + "ALIGN_PERCENTILE_95", + "ALIGN_PERCENTILE_50", + "ALIGN_PERCENTILE_05", + "ALIGN_PERCENT_CHANGE" + ], + "enumDescriptions": [ + "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + ], + "location": "query", + "type": "string" + }, "view": { "description": "Required. Specifies which information is returned about the time series.", "enum": [ "FULL", "HEADERS" ], + "enumDescriptions": [ + "Returns the identity of the metric(s), the time series, and the time series data.", + "Returns the identity of the metric and the time series resource, but not the time series data." + ], "location": "query", "type": "string" } @@ -1262,7 +2113,7 @@ ] }, "query": { - "description": "Queries time series using Monitoring Query Language. This method does not require a Workspace.", + "description": "Queries time series using Monitoring Query Language.", "flatPath": "v3/projects/{projectsId}/timeSeries:query", "httpMethod": "POST", "id": "monitoring.projects.timeSeries.query", @@ -1271,7 +2122,7 @@ ], "parameters": { "name": { - "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -1305,7 +2156,7 @@ ], "parameters": { "parent": { - "description": "Required. The project in which to create the Uptime check. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) in which to create the Uptime check. The format is: projects/[PROJECT_ID_OR_NUMBER] ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -1334,7 +2185,7 @@ ], "parameters": { "name": { - "description": "Required. The Uptime check configuration to delete. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]\n", + "description": "Required. The Uptime check configuration to delete. The format is: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] ", "location": "path", "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$", "required": true, @@ -1360,7 +2211,7 @@ ], "parameters": { "name": { - "description": "Required. The Uptime check configuration to retrieve. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]\n", + "description": "Required. The Uptime check configuration to retrieve. The format is: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] ", "location": "path", "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$", "required": true, @@ -1386,6 +2237,11 @@ "parent" ], "parameters": { + "filter": { + "description": "If provided, this field specifies the criteria that must be met by uptime checks to be included in the response.For more details, see Filtering syntax (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering#filter_syntax).", + "location": "query", + "type": "string" + }, "pageSize": { "description": "The maximum number of results to return in a single response. The server may further constrain the maximum number of results returned in a single page. If the page_size is \u003c=0, the server will decide the number of results to be returned.", "format": "int32", @@ -1398,7 +2254,7 @@ "type": "string" }, "parent": { - "description": "Required. The project whose Uptime check configurations are listed. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) whose Uptime check configurations are listed. The format is: projects/[PROJECT_ID_OR_NUMBER] ", "location": "path", "pattern": "^projects/[^/]+$", "required": true, @@ -1425,7 +2281,7 @@ ], "parameters": { "name": { - "description": "A unique resource name for this Uptime check configuration. The format is:\n projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]\nThis field should be omitted when creating the Uptime check configuration; on create, the resource name is assigned by the server and included in the response.", + "description": "A unique resource name for this Uptime check configuration. The format is: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] [PROJECT_ID_OR_NUMBER] is the Workspace host project associated with the Uptime check.This field should be omitted when creating the Uptime check configuration; on create, the resource name is assigned by the server and included in the response.", "location": "path", "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$", "required": true, @@ -1466,7 +2322,7 @@ ], "parameters": { "parent": { - "description": "Required. Resource name of the parent workspace. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + "description": "Required. Resource name (https://cloud.google.com/monitoring/api/v3#project_name) of the parent Metrics Scope. The format is: projects/[PROJECT_ID_OR_NUMBER] ", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -1500,7 +2356,7 @@ ], "parameters": { "name": { - "description": "Required. Resource name of the Service to delete. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]\n", + "description": "Required. Resource name of the Service to delete. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] ", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -1526,7 +2382,7 @@ ], "parameters": { "name": { - "description": "Required. Resource name of the Service. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]\n", + "description": "Required. Resource name of the Service. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] ", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -1544,7 +2400,7 @@ ] }, "list": { - "description": "List Services for this workspace.", + "description": "List Services for this Metrics Scope.", "flatPath": "v3/{v3Id}/{v3Id1}/services", "httpMethod": "GET", "id": "monitoring.services.list", @@ -1553,7 +2409,7 @@ ], "parameters": { "filter": { - "description": "A filter specifying what Services to return. The filter currently supports the following fields:\n- `identifier_case`\n- `app_engine.module_id`\n- `cloud_endpoints.service`\n- `mesh_istio.mesh_uid`\n- `mesh_istio.service_namespace`\n- `mesh_istio.service_name`\n- `cluster_istio.location` (deprecated)\n- `cluster_istio.cluster_name` (deprecated)\n- `cluster_istio.service_namespace` (deprecated)\n- `cluster_istio.service_name` (deprecated)\nidentifier_case refers to which option in the identifier oneof is populated. For example, the filter identifier_case = \"CUSTOM\" would match all services with a value for the custom field. Valid options are \"CUSTOM\", \"APP_ENGINE\", \"CLOUD_ENDPOINTS\", \"MESH_ISTIO\", and \"CLUSTER_ISTIO\" (deprecated),", + "description": "A filter specifying what Services to return. The filter supports filtering on a particular service-identifier type or one of its attributes.To filter on a particular service-identifier type, the identifier_case refers to which option in the identifier field is populated. For example, the filter identifier_case = \"CUSTOM\" would match all services with a value for the custom field. Valid options include \"CUSTOM\", \"APP_ENGINE\", \"MESH_ISTIO\", and the other options listed at https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#ServiceTo filter on an attribute of a service-identifier type, apply the filter name by using the snake case of the service-identifier type and the attribute of that service-identifier type, and join the two with a period. For example, to filter by the meshUid field of the MeshIstio service-identifier type, you must filter on mesh_istio.mesh_uid = \"123\" to match all services with mesh UID \"123\". Service-identifier types and their attributes are described at https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service", "location": "query", "type": "string" }, @@ -1569,7 +2425,7 @@ "type": "string" }, "parent": { - "description": "Required. Resource name of the parent containing the listed services, either a project or a Monitoring Workspace. The formats are:\nprojects/[PROJECT_ID_OR_NUMBER]\nworkspaces/[HOST_PROJECT_ID_OR_NUMBER]\n", + "description": "Required. Resource name of the parent containing the listed services, either a project (https://cloud.google.com/monitoring/api/v3#project_name) or a Monitoring Metrics Scope. The formats are: projects/[PROJECT_ID_OR_NUMBER] workspaces/[HOST_PROJECT_ID_OR_NUMBER] ", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -1596,7 +2452,7 @@ ], "parameters": { "name": { - "description": "Resource name for this Service. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]\n", + "description": "Resource name for this Service. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] ", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -1635,7 +2491,7 @@ ], "parameters": { "parent": { - "description": "Required. Resource name of the parent Service. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]\n", + "description": "Required. Resource name of the parent Service. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] ", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -1669,7 +2525,7 @@ ], "parameters": { "name": { - "description": "Required. Resource name of the ServiceLevelObjective to delete. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]\n", + "description": "Required. Resource name of the ServiceLevelObjective to delete. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] ", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/serviceLevelObjectives/[^/]+$", "required": true, @@ -1695,7 +2551,7 @@ ], "parameters": { "name": { - "description": "Required. Resource name of the ServiceLevelObjective to get. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]\n", + "description": "Required. Resource name of the ServiceLevelObjective to get. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] ", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/serviceLevelObjectives/[^/]+$", "required": true, @@ -1708,6 +2564,11 @@ "FULL", "EXPLICIT" ], + "enumDescriptions": [ + "Same as FULL.", + "Return the embedded ServiceLevelIndicator in the form in which it was defined. If it was defined using a BasicSli, return that BasicSli.", + "For ServiceLevelIndicators using BasicSli articulation, instead return the ServiceLevelIndicator with its mode of computation fully spelled out as a RequestBasedSli. For ServiceLevelIndicators using RequestBasedSli or WindowsBasedSli, return the ServiceLevelIndicator as it was provided." + ], "location": "query", "type": "string" } @@ -1748,7 +2609,7 @@ "type": "string" }, "parent": { - "description": "Required. Resource name of the parent containing the listed SLOs, either a project or a Monitoring Workspace. The formats are:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]\nworkspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/-\n", + "description": "Required. Resource name of the parent containing the listed SLOs, either a project or a Monitoring Metrics Scope. The formats are: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- ", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -1761,6 +2622,11 @@ "FULL", "EXPLICIT" ], + "enumDescriptions": [ + "Same as FULL.", + "Return the embedded ServiceLevelIndicator in the form in which it was defined. If it was defined using a BasicSli, return that BasicSli.", + "For ServiceLevelIndicators using BasicSli articulation, instead return the ServiceLevelIndicator with its mode of computation fully spelled out as a RequestBasedSli. For ServiceLevelIndicators using RequestBasedSli or WindowsBasedSli, return the ServiceLevelIndicator as it was provided." + ], "location": "query", "type": "string" } @@ -1785,7 +2651,7 @@ ], "parameters": { "name": { - "description": "Resource name for this ServiceLevelObjective. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]\n", + "description": "Resource name for this ServiceLevelObjective. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] ", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+/serviceLevelObjectives/[^/]+$", "required": true, @@ -1848,7 +2714,7 @@ } } }, - "revision": "20200728", + "revision": "20230212", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { @@ -1856,7 +2722,7 @@ "id": "Aggregation", "properties": { "alignmentPeriod": { - "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds, at most 104 weeks. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.", + "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", "format": "google-duration", "type": "string" }, @@ -1956,6 +2822,10 @@ "description": "A description of the conditions under which some aspect of your system is considered to be \"unhealthy\" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/).", "id": "AlertPolicy", "properties": { + "alertStrategy": { + "$ref": "AlertStrategy", + "description": "Control over how this alert policy's notification channels are notified." + }, "combiner": { "description": "How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED.", "enum": [ @@ -2000,11 +2870,11 @@ "description": "A read-only record of the most recent change to the alerting policy. If provided in a call to create or update, this field will be ignored." }, "name": { - "description": "Required if the policy exists. The resource name for this policy. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]\n[ALERT_POLICY_ID] is assigned by Stackdriver Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.", + "description": "Required if the policy exists. The resource name for this policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] [ALERT_POLICY_ID] is assigned by Cloud Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.", "type": "string" }, "notificationChannels": { - "description": "Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the ListNotificationChannels method. The format of the entries in this field is:\nprojects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]\n", + "description": "Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the ListNotificationChannels method. The format of the entries in this field is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] ", "items": { "type": "string" }, @@ -2024,12 +2894,28 @@ }, "type": "object" }, + "AlertStrategy": { + "description": "Control over how the notification channels in notification_channels are notified when this alert fires.", + "id": "AlertStrategy", + "properties": { + "autoClose": { + "description": "If an alert policy that was active has no data for this long, any open incidents will close", + "format": "google-duration", + "type": "string" + }, + "notificationRateLimit": { + "$ref": "NotificationRateLimit", + "description": "Required for alert policies with a LogMatch condition.This limit is not implemented for alert policies that are not log-based." + } + }, + "type": "object" + }, "AppEngine": { "description": "App Engine service. Learn more at https://cloud.google.com/appengine.", "id": "AppEngine", "properties": { "moduleId": { - "description": "The ID of the App Engine module underlying this service. Corresponds to the module_id resource label in the gae_app monitored resource: https://cloud.google.com/monitoring/api/resources#tag_gae_app", + "description": "The ID of the App Engine module underlying this service. Corresponds to the module_id resource label in the gae_app monitored resource (https://cloud.google.com/monitoring/api/resources#tag_gae_app).", "type": "string" } }, @@ -2056,6 +2942,24 @@ }, "type": "object" }, + "BasicService": { + "description": "A well-known service type, defined by its service type and service labels. Documentation and examples here (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli).", + "id": "BasicService", + "properties": { + "serviceLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels that specify the resource that emits the monitoring data which is used for SLO reporting of this Service. Documentation and valid values for given service types here (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli).", + "type": "object" + }, + "serviceType": { + "description": "The type of service that this basic service defines, e.g. APP_ENGINE service type. Documentation and valid values here (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli).", + "type": "string" + } + }, + "type": "object" + }, "BasicSli": { "description": "An SLI measuring performance on a well-known service type. Performance will be computed on the basis of pre-defined metrics. The type of the service_resource determines the metrics to use and the service_resource.labels and metric_labels are used to construct a monitoring filter to filter that metric down to just the data relevant to this service.", "id": "BasicSli", @@ -2116,14 +3020,29 @@ "id": "CloudEndpoints", "properties": { "service": { - "description": "The name of the Cloud Endpoints service underlying this service. Corresponds to the service resource label in the api monitored resource: https://cloud.google.com/monitoring/api/resources#tag_api", + "description": "The name of the Cloud Endpoints service underlying this service. Corresponds to the service resource label in the api monitored resource (https://cloud.google.com/monitoring/api/resources#tag_api).", + "type": "string" + } + }, + "type": "object" + }, + "CloudRun": { + "description": "Cloud Run service. Learn more at https://cloud.google.com/run.", + "id": "CloudRun", + "properties": { + "location": { + "description": "The location the service is run. Corresponds to the location resource label in the cloud_run_revision monitored resource (https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision).", + "type": "string" + }, + "serviceName": { + "description": "The name of the Cloud Run service. Corresponds to the service_name resource label in the cloud_run_revision monitored resource (https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision).", "type": "string" } }, "type": "object" }, "ClusterIstio": { - "description": "Istio service scoped to a single Kubernetes cluster. Learn more at http://istio.io.", + "description": "Istio service scoped to a single Kubernetes cluster. Learn more at https://istio.io. Clusters running OSS Istio will have their services ingested as this type.", "id": "ClusterIstio", "properties": { "clusterName": { @@ -2183,7 +3102,7 @@ "type": "string" }, "values": { - "description": "The measured values during this time interval. Each value must have a different dataSourceName.", + "description": "The measured values during this time interval. Each value must have a different data_source_name.", "items": { "$ref": "CollectdValue" }, @@ -2220,7 +3139,7 @@ "id": "CollectdValue", "properties": { "dataSourceName": { - "description": "The data source for the collectd value. For example there are two data sources for network measurements: \"rx\" and \"tx\".", + "description": "The data source for the collectd value. For example, there are two data sources for network measurements: \"rx\" and \"tx\".", "type": "string" }, "dataSourceType": { @@ -2272,6 +3191,14 @@ "$ref": "MetricAbsence", "description": "A condition that checks that a time series continues to receive new data points." }, + "conditionMatchedLog": { + "$ref": "LogMatch", + "description": "A condition that checks for log messages matching given constraints. If set, no other conditions can be present." + }, + "conditionMonitoringQueryLanguage": { + "$ref": "MonitoringQueryLanguageCondition", + "description": "A condition that uses the Monitoring Query Language to define alerts." + }, "conditionThreshold": { "$ref": "MetricThreshold", "description": "A condition that compares a time series against a threshold." @@ -2281,20 +3208,24 @@ "type": "string" }, "name": { - "description": "Required if the condition exists. The unique resource name for this condition. Its format is:\nprojects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]\n[CONDITION_ID] is assigned by Stackdriver Monitoring when the condition is created as part of a new or updated alerting policy.When calling the alertPolicies.create method, do not include the name field in the conditions of the requested alerting policy. Stackdriver Monitoring creates the condition identifiers and includes them in the new policy.When calling the alertPolicies.update method to update a policy, including a condition name causes the existing condition to be updated. Conditions without names are added to the updated policy. Existing conditions are deleted if they are not updated.Best practice is to preserve [CONDITION_ID] if you make only small changes, such as those to condition thresholds, durations, or trigger values. Otherwise, treat the change as a new condition and let the existing condition be deleted.", + "description": "Required if the condition exists. The unique resource name for this condition. Its format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] [CONDITION_ID] is assigned by Cloud Monitoring when the condition is created as part of a new or updated alerting policy.When calling the alertPolicies.create method, do not include the name field in the conditions of the requested alerting policy. Cloud Monitoring creates the condition identifiers and includes them in the new policy.When calling the alertPolicies.update method to update a policy, including a condition name causes the existing condition to be updated. Conditions without names are added to the updated policy. Existing conditions are deleted if they are not updated.Best practice is to preserve [CONDITION_ID] if you make only small changes, such as those to condition thresholds, durations, or trigger values. Otherwise, treat the change as a new condition and let the existing condition be deleted.", "type": "string" } }, "type": "object" }, "ContentMatcher": { - "description": "Optional. Used to perform content matching. This allows matching based on substrings and regular expressions, together with their negations. Only the first 4\u0026nbsp;MB of an HTTP or HTTPS check's response (and the first 1\u0026nbsp;MB of a TCP check's response) are examined for purposes of content matching.", + "description": "Optional. Used to perform content matching. This allows matching based on substrings and regular expressions, together with their negations. Only the first 4 MB of an HTTP or HTTPS check's response (and the first 1 MB of a TCP check's response) are examined for purposes of content matching.", "id": "ContentMatcher", "properties": { "content": { - "description": "String or regex content to match. Maximum 1024 bytes. An empty content string indicates no content matching is to be performed.", + "description": "String, regex or JSON content to match. Maximum 1024 bytes. An empty content string indicates no content matching is to be performed.", "type": "string" }, + "jsonPathMatcher": { + "$ref": "JsonPathMatcher", + "description": "Matcher information for MATCHES_JSON_PATH and NOT_MATCHES_JSON_PATH" + }, "matcher": { "description": "The type of content matcher that will be applied to the server output, compared to the content string when the check is run.", "enum": [ @@ -2302,14 +3233,18 @@ "CONTAINS_STRING", "NOT_CONTAINS_STRING", "MATCHES_REGEX", - "NOT_MATCHES_REGEX" + "NOT_MATCHES_REGEX", + "MATCHES_JSON_PATH", + "NOT_MATCHES_JSON_PATH" ], "enumDescriptions": [ "No content matcher type specified (maintained for backward compatibility, but deprecated for future use). Treated as CONTAINS_STRING.", "Selects substring matching. The match succeeds if the output contains the content string. This is the default value for checks without a matcher option, or where the value of matcher is CONTENT_MATCHER_OPTION_UNSPECIFIED.", "Selects negation of substring matching. The match succeeds if the output does NOT contain the content string.", - "Selects regular-expression matching. The match succeeds of the output matches the regular expression specified in the content string.", - "Selects negation of regular-expression matching. The match succeeds if the output does NOT match the regular expression specified in the content string." + "Selects regular-expression matching. The match succeeds if the output matches the regular expression specified in the content string. Regex matching is only supported for HTTP/HTTPS checks.", + "Selects negation of regular-expression matching. The match succeeds if the output does NOT match the regular expression specified in the content string. Regex matching is only supported for HTTP/HTTPS checks.", + "Selects JSONPath matching. See JsonPathMatcher for details on when the match succeeds. JSONPath matching is only supported for HTTP/HTTPS checks.", + "Selects JSONPath matching. See JsonPathMatcher for details on when the match succeeds. Succeeds when output does NOT match as specified. JSONPath is only supported for HTTP/HTTPS checks." ], "type": "string" } @@ -2394,8 +3329,22 @@ }, "type": "object" }, + "Criteria": { + "description": "Criteria specific to the AlertPolicys that this Snooze applies to. The Snooze will suppress alerts that come from one of the AlertPolicys whose names are supplied.", + "id": "Criteria", + "properties": { + "policies": { + "description": "The specific AlertPolicy names for the alert that should be snoozed. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] There is a limit of 16 policies per snooze. This limit is checked during snooze creation.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "Custom": { - "description": "Custom view of service telemetry. Currently a place-holder pending final design.", + "description": "Use a custom service to designate a service that you want to monitor when none of the other service types (like App Engine, Cloud Run, or a GKE type) matches your intended service.", "id": "Custom", "properties": {}, "type": "object" @@ -2438,7 +3387,7 @@ "description": "If specified, contains the range of the population values. The field must not be present if the count is zero. This field is presently ignored by the Cloud Monitoring API v3." }, "sumOfSquaredDeviation": { - "description": "The sum of squared deviations from the mean of the values in the population. For values x_i this is:\nSum[i=1..n]((x_i - mean)^2)\nKnuth, \"The Art of Computer Programming\", Vol. 2, page 323, 3rd edition describes Welford's method for accumulating this sum in one pass.If count is zero then this field must be zero.", + "description": "The sum of squared deviations from the mean of the values in the population. For values x_i this is: Sum[i=1..n]((x_i - mean)^2) Knuth, \"The Art of Computer Programming\", Vol. 2, page 232, 3rd edition describes Welford's method for accumulating this sum in one pass.If count is zero then this field must be zero.", "format": "double", "type": "number" } @@ -2446,11 +3395,11 @@ "type": "object" }, "DistributionCut": { - "description": "A DistributionCut defines a TimeSeries and thresholds used for measuring good service and total service. The TimeSeries must have ValueType =\nDISTRIBUTION and MetricKind = DELTA or MetricKind = CUMULATIVE. The computed good_service will be the count of values x in the Distribution such that range.min \u003c= x \u003c range.max.", + "description": "A DistributionCut defines a TimeSeries and thresholds used for measuring good service and total service. The TimeSeries must have ValueType = DISTRIBUTION and MetricKind = DELTA or MetricKind = CUMULATIVE. The computed good_service will be the estimated count of values in the Distribution that fall within the specified min and max.", "id": "DistributionCut", "properties": { "distributionFilter": { - "description": "A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) specifying a TimeSeries aggregating values. Must have ValueType =\nDISTRIBUTION and MetricKind = DELTA or MetricKind = CUMULATIVE.", + "description": "A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) specifying a TimeSeries aggregating values. Must have ValueType = DISTRIBUTION and MetricKind = DELTA or MetricKind = CUMULATIVE.", "type": "string" }, "range": { @@ -2465,7 +3414,7 @@ "id": "Documentation", "properties": { "content": { - "description": "The text of the documentation, interpreted according to mime_type. The content may not exceed 8,192 Unicode characters and may not exceed more than 10,240 bytes when encoded in UTF-8 format, whichever is smaller.", + "description": "The body of the documentation, interpreted according to mime_type. The content may not exceed 8,192 Unicode characters and may not exceed more than 10,240 bytes when encoded in UTF-8 format, whichever is smaller. This text can be templatized by using variables (https://cloud.google.com/monitoring/alerts/doc-variables).", "type": "string" }, "mimeType": { @@ -2490,7 +3439,7 @@ "type": "object" }, "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.", + "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } ", "id": "Empty", "properties": {}, "type": "object" @@ -2516,7 +3465,7 @@ "id": "Exemplar", "properties": { "attachments": { - "description": "Contextual information about the example value. Examples are:Trace: type.googleapis.com/google.monitoring.v3.SpanContextLiteral string: type.googleapis.com/google.protobuf.StringValueLabels dropped during aggregation: type.googleapis.com/google.monitoring.v3.DroppedLabelsThere may be only a single attachment of any given message type in a single exemplar, and this is enforced by the system.", + "description": "Contextual information about the example value. Examples are:Trace: type.googleapis.com/google.monitoring.v3.SpanContextLiteral string: type.googleapis.com/google.protobuf.StringValueLabels dropped during aggregation: type.googleapis.com/google.monitoring.v3.DroppedLabelsThere may be only a single attachment of any given message type in a single exemplar, and this is enforced by the system.", "items": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -2540,7 +3489,7 @@ "type": "object" }, "Explicit": { - "description": "Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): boundsi Lower bound (1 \u003c= i \u003c N); boundsi - 1The bounds field must contain at least one element. If bounds has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.", + "description": "Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): boundsi Lower bound (1 \u003c= i \u003c N); boundsi - 1The bounds field must contain at least one element. If bounds has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.", "id": "Explicit", "properties": { "bounds": { @@ -2555,7 +3504,7 @@ "type": "object" }, "Exponential": { - "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i). Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", + "description": "Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): scale * (growth_factor ^ i). Lower bound (1 \u003c= i \u003c N): scale * (growth_factor ^ (i - 1)).", "id": "Exponential", "properties": { "growthFactor": { @@ -2682,6 +3631,18 @@ }, "type": "object" }, + "ForecastOptions": { + "description": "Options used when forecasting the time series and testing the predicted value against the threshold.", + "id": "ForecastOptions", + "properties": { + "forecastHorizon": { + "description": "Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, "GetNotificationChannelVerificationCodeRequest": { "description": "The GetNotificationChannelVerificationCode request.", "id": "GetNotificationChannelVerificationCodeRequest", @@ -2710,8 +3671,92 @@ }, "type": "object" }, + "GkeNamespace": { + "description": "GKE Namespace. The field names correspond to the resource metadata labels on monitored resources that fall under a namespace (for example, k8s_container or k8s_pod).", + "id": "GkeNamespace", + "properties": { + "clusterName": { + "description": "The name of the parent cluster.", + "type": "string" + }, + "location": { + "description": "The location of the parent cluster. This may be a zone or region.", + "type": "string" + }, + "namespaceName": { + "description": "The name of this namespace.", + "type": "string" + }, + "projectId": { + "description": "Output only. The project this resource lives in. For legacy services migrated from the Custom type, this may be a distinct project from the one parenting the service itself.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "GkeService": { + "description": "GKE Service. The \"service\" here represents a Kubernetes service object (https://kubernetes.io/docs/concepts/services-networking/service). The field names correspond to the resource labels on k8s_service monitored resources (https://cloud.google.com/monitoring/api/resources#tag_k8s_service).", + "id": "GkeService", + "properties": { + "clusterName": { + "description": "The name of the parent cluster.", + "type": "string" + }, + "location": { + "description": "The location of the parent cluster. This may be a zone or region.", + "type": "string" + }, + "namespaceName": { + "description": "The name of the parent namespace.", + "type": "string" + }, + "projectId": { + "description": "Output only. The project this resource lives in. For legacy services migrated from the Custom type, this may be a distinct project from the one parenting the service itself.", + "readOnly": true, + "type": "string" + }, + "serviceName": { + "description": "The name of this service.", + "type": "string" + } + }, + "type": "object" + }, + "GkeWorkload": { + "description": "A GKE Workload (Deployment, StatefulSet, etc). The field names correspond to the metadata labels on monitored resources that fall under a workload (for example, k8s_container or k8s_pod).", + "id": "GkeWorkload", + "properties": { + "clusterName": { + "description": "The name of the parent cluster.", + "type": "string" + }, + "location": { + "description": "The location of the parent cluster. This may be a zone or region.", + "type": "string" + }, + "namespaceName": { + "description": "The name of the parent namespace.", + "type": "string" + }, + "projectId": { + "description": "Output only. The project this resource lives in. For legacy services migrated from the Custom type, this may be a distinct project from the one parenting the service itself.", + "readOnly": true, + "type": "string" + }, + "topLevelControllerName": { + "description": "The name of this workload.", + "type": "string" + }, + "topLevelControllerType": { + "description": "The type of this workload (for example, \"Deployment\" or \"DaemonSet\")", + "type": "string" + } + }, + "type": "object" + }, "GoogleMonitoringV3Range": { - "description": "Range of numerical values, inclusive of min and exclusive of max. If the open range \"\u003c range.max\" is desired, set range.min = -infinity. If the open range \"\u003e= range.min\" is desired, set range.max = infinity.", + "description": "Range of numerical values within min and max.", "id": "GoogleMonitoringV3Range", "properties": { "max": { @@ -2744,11 +3789,11 @@ "type": "boolean" }, "name": { - "description": "Output only. The name of this group. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\nWhen creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique [GROUP_ID] that is generated automatically.", + "description": "Output only. The name of this group. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique [GROUP_ID] that is generated automatically.", "type": "string" }, "parentName": { - "description": "The name of the group's parent, if it has one. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\nFor groups with no parent, parent_name is the empty string, \"\".", + "description": "The name of the group's parent, if it has one. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] For groups with no parent, parent_name is the empty string, \"\".", "type": "string" } }, @@ -2758,27 +3803,40 @@ "description": "Information involved in an HTTP/HTTPS Uptime check request.", "id": "HttpCheck", "properties": { + "acceptedResponseStatusCodes": { + "description": "If present, the check will only pass if the HTTP response status code is in this set of status codes. If empty, the HTTP status code will only pass if the HTTP status code is 200-299.", + "items": { + "$ref": "ResponseStatusCode" + }, + "type": "array" + }, "authInfo": { "$ref": "BasicAuthentication", "description": "The authentication information. Optional when creating an HTTP check; defaults to empty." }, "body": { - "description": "The request body associated with the HTTP POST request. If content_type is URL_ENCODED, the body passed in must be URL-encoded. Users can provide a Content-Length header via the headers field or the API will do so. If the request_method is GET and body is not empty, the API will return an error. The maximum byte size is 1 megabyte. Note: As with all bytes fields JSON representations are base64 encoded. e.g.: \"foo=bar\" in URL-encoded form is \"foo%3Dbar\" and in base64 encoding is \"Zm9vJTI1M0RiYXI=\".", + "description": "The request body associated with the HTTP POST request. If content_type is URL_ENCODED, the body passed in must be URL-encoded. Users can provide a Content-Length header via the headers field or the API will do so. If the request_method is GET and body is not empty, the API will return an error. The maximum byte size is 1 megabyte.Note: If client libraries aren't used (which performs the conversion automatically) base64 encode your body data since the field is of bytes type.", "format": "byte", "type": "string" }, "contentType": { - "description": "The content type to use for the check.", + "description": "The content type header to use for the check. The following configurations result in errors: 1. Content type is specified in both the headers field and the content_type field. 2. Request method is GET and content_type is not TYPE_UNSPECIFIED 3. Request method is POST and content_type is TYPE_UNSPECIFIED. 4. Request method is POST and a \"Content-Type\" header is provided via headers field. The content_type field should be used instead.", "enum": [ "TYPE_UNSPECIFIED", - "URL_ENCODED" + "URL_ENCODED", + "USER_PROVIDED" ], "enumDescriptions": [ - "No content type specified. If the request method is POST, an unspecified content type results in a check creation rejection.", - "body is in URL-encoded form. Equivalent to setting the Content-Type to application/x-www-form-urlencoded in the HTTP request." + "No content type specified.", + "body is in URL-encoded form. Equivalent to setting the Content-Type to application/x-www-form-urlencoded in the HTTP request.", + "body is in custom_content_type form. Equivalent to setting the Content-Type to the contents of custom_content_type in the HTTP request." ], "type": "string" }, + "customContentType": { + "description": "A user provided content type header to use for the check. The invalid configurations outlined in the content_type field apply to custom_content_type, as well as the following: 1. content_type is URL_ENCODED and custom_content_type is set. 2. content_type is USER_PROVIDED and custom_content_type is not set.", + "type": "string" + }, "headers": { "additionalProperties": { "type": "string" @@ -2794,6 +3852,10 @@ "description": "Optional (defaults to \"/\"). The path to the page against which to run the check. Will be combined with the host (specified within the monitored_resource) and port to construct the full URL. If the provided path does not begin with \"/\", a \"/\" will be prepended automatically.", "type": "string" }, + "pingConfig": { + "$ref": "PingConfig", + "description": "Contains information needed to add pings to an HTTP check." + }, "port": { "description": "Optional (defaults to 80 when use_ssl is false, and 443 when use_ssl is true). The TCP port on the HTTP server against which to run the check. Will be combined with host (specified within the monitored_resource) and path to construct the full URL.", "format": "int32", @@ -2829,7 +3891,7 @@ "id": "InternalChecker", "properties": { "displayName": { - "description": "The checker's human-readable name. The display name should be unique within a Stackdriver Workspace in order to make it easier to identify; however, uniqueness is not enforced.", + "description": "The checker's human-readable name. The display name should be unique within a Cloud Monitoring Metrics Scope in order to make it easier to identify; however, uniqueness is not enforced.", "type": "string" }, "gcpZone": { @@ -2837,7 +3899,7 @@ "type": "string" }, "name": { - "description": "A unique resource name for this InternalChecker. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID]\n[PROJECT_ID_OR_NUMBER] is the Stackdriver Workspace project for the Uptime check config associated with the internal checker.", + "description": "A unique resource name for this InternalChecker. The format is: projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID] [PROJECT_ID_OR_NUMBER] is the Cloud Monitoring Metrics Scope project for the Uptime check config associated with the internal checker.", "type": "string" }, "network": { @@ -2845,7 +3907,7 @@ "type": "string" }, "peerProjectId": { - "description": "The GCP project ID where the internal checker lives. Not necessary the same as the Workspace project.", + "description": "The GCP project ID where the internal checker lives. Not necessary the same as the Metrics Scope project.", "type": "string" }, "state": { @@ -2865,6 +3927,50 @@ }, "type": "object" }, + "IstioCanonicalService": { + "description": "Canonical service scoped to an Istio mesh. Anthos clusters running ASM \u003e= 1.6.8 will have their services ingested as this type.", + "id": "IstioCanonicalService", + "properties": { + "canonicalService": { + "description": "The name of the canonical service underlying this service. Corresponds to the destination_canonical_service_name metric label in label in Istio metrics (https://cloud.google.com/monitoring/api/metrics_istio).", + "type": "string" + }, + "canonicalServiceNamespace": { + "description": "The namespace of the canonical service underlying this service. Corresponds to the destination_canonical_service_namespace metric label in Istio metrics (https://cloud.google.com/monitoring/api/metrics_istio).", + "type": "string" + }, + "meshUid": { + "description": "Identifier for the Istio mesh in which this canonical service is defined. Corresponds to the mesh_uid metric label in Istio metrics (https://cloud.google.com/monitoring/api/metrics_istio).", + "type": "string" + } + }, + "type": "object" + }, + "JsonPathMatcher": { + "description": "Information needed to perform a JSONPath content match. Used for ContentMatcherOption::MATCHES_JSON_PATH and ContentMatcherOption::NOT_MATCHES_JSON_PATH.", + "id": "JsonPathMatcher", + "properties": { + "jsonMatcher": { + "description": "The type of JSONPath match that will be applied to the JSON output (ContentMatcher.content)", + "enum": [ + "JSON_PATH_MATCHER_OPTION_UNSPECIFIED", + "EXACT_MATCH", + "REGEX_MATCH" + ], + "enumDescriptions": [ + "No JSONPath matcher type specified (not valid).", + "Selects 'exact string' matching. The match succeeds if the content at the json_path within the output is exactly the same as the content string.", + "Selects regular-expression matching. The match succeeds if the content at the json_path within the output matches the regular expression specified in the content string." + ], + "type": "string" + }, + "jsonPath": { + "description": "JSONPath within the response output pointing to the expected ContentMatcher::content to match against.", + "type": "string" + } + }, + "type": "object" + }, "LabelDescriptor": { "description": "A description of a label.", "id": "LabelDescriptor", @@ -2874,7 +3980,7 @@ "type": "string" }, "key": { - "description": "The key for this label. The key must meet the following criteria:\nDoes not exceed 100 characters.\nMatches the following regular expression: [a-zA-Z][a-zA-Z0-9_]*\nThe first character must be an upper- or lower-case letter.\nThe remaining characters must be letters, digits, or underscores.", + "description": "The key for this label. The key must meet the following criteria: Does not exceed 100 characters. Matches the following regular expression: [a-zA-Z][a-zA-Z0-9_]* The first character must be an upper- or lower-case letter. The remaining characters must be letters, digits, or underscores.", "type": "string" }, "valueType": { @@ -2927,7 +4033,7 @@ "type": "object" }, "Linear": { - "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", + "description": "Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 \u003c= i \u003c N-1): offset + (width * i). Lower bound (1 \u003c= i \u003c N): offset + (width * (i - 1)).", "id": "Linear", "properties": { "numFiniteBuckets": { @@ -3125,6 +4231,24 @@ }, "type": "object" }, + "ListSnoozesResponse": { + "description": "The results of a successful ListSnoozes call, containing the matching Snoozes.", + "id": "ListSnoozesResponse", + "properties": { + "nextPageToken": { + "description": "Page token for repeated calls to ListSnoozes, to fetch additional pages of results. If this is empty or missing, there are no more pages.", + "type": "string" + }, + "snoozes": { + "description": "Snoozes matching this list call.", + "items": { + "$ref": "Snooze" + }, + "type": "array" + } + }, + "type": "object" + }, "ListTimeSeriesResponse": { "description": "The ListTimeSeries response.", "id": "ListTimeSeriesResponse", @@ -3195,8 +4319,26 @@ }, "type": "object" }, + "LogMatch": { + "description": "A condition type that checks whether a log message in the scoping project (https://cloud.google.com/monitoring/api/v3#project_name) satisfies the given filter. Logs from other projects in the metrics scope are not evaluated.", + "id": "LogMatch", + "properties": { + "filter": { + "description": "Required. A logs-based filter. See Advanced Logs Queries (https://cloud.google.com/logging/docs/view/advanced-queries) for how this filter should be constructed.", + "type": "string" + }, + "labelExtractors": { + "additionalProperties": { + "type": "string" + }, + "description": "Optional. A map from a label key to an extractor expression, which is used to extract the value for this label key. Each entry in this map is a specification for how data should be extracted from log entries that match filter. Each combination of extracted values is treated as a separate rule for the purposes of triggering notifications. Label keys and corresponding values can be used in notifications generated by this condition.Please see the documentation on logs-based metric valueExtractors (https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics#LogMetric.FIELDS.value_extractor) for syntax and examples.", + "type": "object" + } + }, + "type": "object" + }, "MeshIstio": { - "description": "Istio service scoped to an Istio mesh", + "description": "Istio service scoped to an Istio mesh. Anthos clusters running ASM \u003c 1.6.8 will have their services ingested as this type.", "id": "MeshIstio", "properties": { "meshUid": { @@ -3237,19 +4379,19 @@ "id": "MetricAbsence", "properties": { "aggregations": { - "description": "Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resrouces). Multiple aggregations are applied in the order specified.This field is similar to the one in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It is advisable to use the ListTimeSeries method when debugging this field.", + "description": "Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resources). Multiple aggregations are applied in the order specified.This field is similar to the one in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It is advisable to use the ListTimeSeries method when debugging this field.", "items": { "$ref": "Aggregation" }, "type": "array" }, "duration": { - "description": "The amount of time that a time series must fail to report new data to be considered failing. Currently, only values that are a multiple of a minute--e.g. 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. The Duration.nanos field is ignored.", + "description": "The amount of time that a time series must fail to report new data to be considered failing. The minimum value of this field is 120 seconds. Larger values that are a multiple of a minute--for example, 240 or 300 seconds--are supported. If an invalid value is given, an error will be returned. The Duration.nanos field is ignored.", "format": "google-duration", "type": "string" }, "filter": { - "description": "A filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies which time series should be compared with the threshold.The filter is similar to the one that is specified in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that call is useful to verify the time series that will be retrieved / processed) and must specify the metric type and optionally may contain restrictions on resource type, resource labels, and metric labels. This field may not exceed 2048 Unicode characters in length.", + "description": "Required. A filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies which time series should be compared with the threshold.The filter is similar to the one that is specified in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that call is useful to verify the time series that will be retrieved / processed). The filter must specify the metric type and the resource type. Optionally, it can specify resource labels and metric labels. This field must not exceed 2048 Unicode characters in length.", "type": "string" }, "trigger": { @@ -3260,7 +4402,7 @@ "type": "object" }, "MetricDescriptor": { - "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable.The following are specific rules for service defined Monitoring metric descriptors:\ntype, metric_kind, value_type and description fields are all required. The unit field must be specified if the value_type is any of DOUBLE, INT64, DISTRIBUTION.\nMaximum of default 500 metric descriptors per service is allowed.\nMaximum of default 10 labels per metric descriptor is allowed.The default maximum limit can be overridden. Please follow https://cloud.google.com/monitoring/quotas", + "description": "Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable.", "id": "MetricDescriptor", "properties": { "description": { @@ -3272,7 +4414,7 @@ "type": "string" }, "labels": { - "description": "The set of labels that can be used to describe a specific instance of this metric type.The label key name must follow:\nOnly upper and lower-case letters, digits and underscores (_) are allowed.\nLabel name must start with a letter or digit.\nThe maximum length of a label name is 100 characters.For example, the appengine.googleapis.com/http/server/response_latencies metric type has a label for the HTTP response code, response_code, so you can look at latencies for successful responses or just for responses that failed.", + "description": "The set of labels that can be used to describe a specific instance of this metric type. For example, the appengine.googleapis.com/http/server/response_latencies metric type has a label for the HTTP response code, response_code, so you can look at latencies for successful responses or just for responses that failed.", "items": { "$ref": "LabelDescriptor" }, @@ -3295,10 +4437,10 @@ "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects allowlisted. Alpha releases don't have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", "GA features are open to all developers and are considered stable and fully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our Terms of Service (https://cloud.google.com/terms/) and the Google Cloud Platform Subject to the Deprecation Policy (https://cloud.google.com/terms/deprecation) documentation." + "Deprecated features are scheduled to be shut down and removed. For more information, see the \"Deprecation Policy\" section of our Terms of Service (https://cloud.google.com/terms/) and the Google Cloud Platform Subject to the Deprecation Policy (https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, @@ -3334,11 +4476,11 @@ "type": "string" }, "type": { - "description": "The metric type, including its DNS name prefix. The type is not URL-encoded.All service defined metrics must be prefixed with the service name, in the format of {service name}/{relative metric name}, such as cloudsql.googleapis.com/database/cpu/utilization. The relative metric name must follow:\nOnly upper and lower-case letters, digits, '/' and underscores '_' are allowed.\nThe maximum number of characters allowed for the relative_metric_name is 100.All user-defined metric types have the DNS name custom.googleapis.com, external.googleapis.com, or logging.googleapis.com/user/.Metric types should use a natural hierarchical grouping. For example:\n\"custom.googleapis.com/invoice/paid/amount\"\n\"external.googleapis.com/prometheus/up\"\n\"appengine.googleapis.com/http/server/response_latencies\"\n", + "description": "The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined metric types have the DNS name custom.googleapis.com or external.googleapis.com. Metric types should use a natural hierarchical grouping. For example: \"custom.googleapis.com/invoice/paid/amount\" \"external.googleapis.com/prometheus/up\" \"appengine.googleapis.com/http/server/response_latencies\" ", "type": "string" }, "unit": { - "description": "The units in which the metric value is reported. It is only applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The unit defines the representation of the stored metric values.Different systems may scale the values to be more easily displayed (so a value of 0.02KBy might be displayed as 20By, and a value of 3523KBy might be displayed as 3.5MBy). However, if the unit is KBy, then the value of the metric is always in thousands of bytes, no matter how it may be displayed..If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an INT64 CUMULATIVE metric whose unit is s{CPU} (or equivalently 1s{CPU} or just s). If the job uses 12,005 CPU-seconds, then the value is written as 12005.Alternatively, if you want a custom metric to record data in a more granular way, you can create a DOUBLE CUMULATIVE metric whose unit is ks{CPU}, and then write the value 12.005 (which is 12005/1000), or use Kis{CPU} and write 11.723 (which is 12005/1024).The supported units are a subset of The Unified Code for Units of Measure (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT)\nbit bit\nBy byte\ns second\nmin minute\nh hour\nd day\n1 dimensionlessPrefixes (PREFIX)\nk kilo (10^3)\nM mega (10^6)\nG giga (10^9)\nT tera (10^12)\nP peta (10^15)\nE exa (10^18)\nZ zetta (10^21)\nY yotta (10^24)\nm milli (10^-3)\nu micro (10^-6)\nn nano (10^-9)\np pico (10^-12)\nf femto (10^-15)\na atto (10^-18)\nz zepto (10^-21)\ny yocto (10^-24)\nKi kibi (2^10)\nMi mebi (2^20)\nGi gibi (2^30)\nTi tebi (2^40)\nPi pebi (2^50)GrammarThe grammar also includes these connectors:\n/ division or ratio (as an infix operator). For examples, kBy/{email} or MiBy/10ms (although you should almost never have /s in a metric unit; rates should always be computed at query time from the underlying cumulative or delta value).\n. multiplication or composition (as an infix operator). For examples, GBy.d or k{watt}.h.The grammar for a unit is as follows:\nExpression = Component { \".\" Component } { \"/\" Component } ;\n\nComponent = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ]\n | Annotation\n | \"1\"\n ;\n\nAnnotation = \"{\" NAME \"}\" ;\nNotes:\nAnnotation is just a comment if it follows a UNIT. If the annotation is used alone, then the unit is equivalent to 1. For examples, {request}/s == 1/s, By{transmitted}/s == By/s.\nNAME is a sequence of non-blank printable ASCII characters not containing { or }.\n1 represents a unitary dimensionless unit (https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in 1/s. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as 1/d or {new-users}/d (and a metric value 5 would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as 1000/d or k1/d or k{page_views}/d (and a metric value of 5.3 would mean \"5300 page views per day\").\n% represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value 3 means \"3 percent\").\n10^2.% indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value 0.03 means \"3 percent\").", + "description": "The units in which the metric value is reported. It is only applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The unit defines the representation of the stored metric values.Different systems might scale the values to be more easily displayed (so a value of 0.02kBy might be displayed as 20By, and a value of 3523kBy might be displayed as 3.5MBy). However, if the unit is kBy, then the value of the metric is always in thousands of bytes, no matter how it might be displayed.If you want a custom metric to record the exact number of CPU-seconds used by a job, you can create an INT64 CUMULATIVE metric whose unit is s{CPU} (or equivalently 1s{CPU} or just s). If the job uses 12,005 CPU-seconds, then the value is written as 12005.Alternatively, if you want a custom metric to record data in a more granular way, you can create a DOUBLE CUMULATIVE metric whose unit is ks{CPU}, and then write the value 12.005 (which is 12005/1000), or use Kis{CPU} and write 11.723 (which is 12005/1024).The supported units are a subset of The Unified Code for Units of Measure (https://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT) bit bit By byte s second min minute h hour d day 1 dimensionlessPrefixes (PREFIX) k kilo (10^3) M mega (10^6) G giga (10^9) T tera (10^12) P peta (10^15) E exa (10^18) Z zetta (10^21) Y yotta (10^24) m milli (10^-3) u micro (10^-6) n nano (10^-9) p pico (10^-12) f femto (10^-15) a atto (10^-18) z zepto (10^-21) y yocto (10^-24) Ki kibi (2^10) Mi mebi (2^20) Gi gibi (2^30) Ti tebi (2^40) Pi pebi (2^50)GrammarThe grammar also includes these connectors: / division or ratio (as an infix operator). For examples, kBy/{email} or MiBy/10ms (although you should almost never have /s in a metric unit; rates should always be computed at query time from the underlying cumulative or delta value). . multiplication or composition (as an infix operator). For examples, GBy.d or k{watt}.h.The grammar for a unit is as follows: Expression = Component { \".\" Component } { \"/\" Component } ; Component = ( [ PREFIX ] UNIT | \"%\" ) [ Annotation ] | Annotation | \"1\" ; Annotation = \"{\" NAME \"}\" ; Notes: Annotation is just a comment if it follows a UNIT. If the annotation is used alone, then the unit is equivalent to 1. For examples, {request}/s == 1/s, By{transmitted}/s == By/s. NAME is a sequence of non-blank printable ASCII characters not containing { or }. 1 represents a unitary dimensionless unit (https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as in 1/s. It is typically used when none of the basic units are appropriate. For example, \"new users per day\" can be represented as 1/d or {new-users}/d (and a metric value 5 would mean \"5 new users). Alternatively, \"thousands of page views per day\" would be represented as 1000/d or k1/d or k{page_views}/d (and a metric value of 5.3 would mean \"5300 page views per day\"). % represents dimensionless value of 1/100, and annotates values giving a percentage (so the metric values are typically in the range of 0..100, and a metric value 3 means \"3 percent\"). 10^2.% indicates a metric contains a ratio, typically in the range 0..1, that will be multiplied by 100 and displayed as a percentage (so a metric value 0.03 means \"3 percent\").", "type": "string" }, "valueType": { @@ -3392,10 +4534,10 @@ "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects allowlisted. Alpha releases don't have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", "GA features are open to all developers and are considered stable and fully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our Terms of Service (https://cloud.google.com/terms/) and the Google Cloud Platform Subject to the Deprecation Policy (https://cloud.google.com/terms/deprecation) documentation." + "Deprecated features are scheduled to be shut down and removed. For more information, see the \"Deprecation Policy\" section of our Terms of Service (https://cloud.google.com/terms/) and the Google Cloud Platform Subject to the Deprecation Policy (https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, @@ -3408,7 +4550,7 @@ "type": "object" }, "MetricRange": { - "description": "A MetricRange is used when each window is good when the value x of a single TimeSeries satisfies range.min \u003c= x \u003c range.max. The provided TimeSeries must have ValueType = INT64 or ValueType = DOUBLE and MetricKind = GAUGE.", + "description": "A MetricRange is used when each window is good when the value x of a single TimeSeries satisfies range.min \u003c= x \u003c= range.max. The provided TimeSeries must have ValueType = INT64 or ValueType = DOUBLE and MetricKind = GAUGE.", "id": "MetricRange", "properties": { "range": { @@ -3427,7 +4569,7 @@ "id": "MetricThreshold", "properties": { "aggregations": { - "description": "Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resrouces). Multiple aggregations are applied in the order specified.This field is similar to the one in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It is advisable to use the ListTimeSeries method when debugging this field.", + "description": "Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resources). Multiple aggregations are applied in the order specified.This field is similar to the one in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It is advisable to use the ListTimeSeries method when debugging this field.", "items": { "$ref": "Aggregation" }, @@ -3471,10 +4613,30 @@ "format": "google-duration", "type": "string" }, + "evaluationMissingData": { + "description": "A condition control that determines how metric-threshold conditions are evaluated when data stops arriving.", + "enum": [ + "EVALUATION_MISSING_DATA_UNSPECIFIED", + "EVALUATION_MISSING_DATA_INACTIVE", + "EVALUATION_MISSING_DATA_ACTIVE", + "EVALUATION_MISSING_DATA_NO_OP" + ], + "enumDescriptions": [ + "An unspecified evaluation missing data option. Equivalent to EVALUATION_MISSING_DATA_NO_OP.", + "If there is no data to evaluate the condition, then evaluate the condition as false.", + "If there is no data to evaluate the condition, then evaluate the condition as true.", + "Do not evaluate the condition to any value if there is no data." + ], + "type": "string" + }, "filter": { - "description": "A filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies which time series should be compared with the threshold.The filter is similar to the one that is specified in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that call is useful to verify the time series that will be retrieved / processed) and must specify the metric type and optionally may contain restrictions on resource type, resource labels, and metric labels. This field may not exceed 2048 Unicode characters in length.", + "description": "Required. A filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies which time series should be compared with the threshold.The filter is similar to the one that is specified in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that call is useful to verify the time series that will be retrieved / processed). The filter must specify the metric type and the resource type. Optionally, it can specify resource labels and metric labels. This field must not exceed 2048 Unicode characters in length.", "type": "string" }, + "forecastOptions": { + "$ref": "ForecastOptions", + "description": "When this field is present, the MetricThreshold condition forecasts whether the time series is predicted to violate the threshold within the forecast_horizon. When this field is not set, the MetricThreshold tests the current value of the timeseries against the threshold." + }, "thresholdValue": { "description": "A value against which to compare the time series.", "format": "double", @@ -3488,7 +4650,7 @@ "type": "object" }, "MonitoredResource": { - "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"instance_id\" and \"zone\":\n{ \"type\": \"gce_instance\",\n \"labels\": { \"instance_id\": \"12345678901234\",\n \"zone\": \"us-central1-a\" }}\n", + "description": "An object representing a resource that can be used for monitoring, logging, billing, or other purposes. Examples include virtual machine instances, databases, and storage devices such as disks. The type field identifies a MonitoredResourceDescriptor object that describes the resource's schema. Information in the labels field identifies the actual resource and its attributes according to the schema. For example, a particular Compute Engine VM instance could be represented by the following object, because the MonitoredResourceDescriptor for \"gce_instance\" has labels \"project_id\", \"instance_id\" and \"zone\": { \"type\": \"gce_instance\", \"labels\": { \"project_id\": \"my-project\", \"instance_id\": \"12345678901234\", \"zone\": \"us-central1-a\" }} ", "id": "MonitoredResource", "properties": { "labels": { @@ -3499,14 +4661,14 @@ "type": "object" }, "type": { - "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Compute Engine VM instance is gce_instance. For a list of types, see Monitoring resource types and Logging resource types.", + "description": "Required. The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor object. For example, the type of a Compute Engine VM instance is gce_instance. For a list of types, see Monitoring resource types (https://cloud.google.com/monitoring/api/resources) and Logging resource types (https://cloud.google.com/logging/docs/api/v2/resource-list).", "type": "string" } }, "type": "object" }, "MonitoredResourceDescriptor": { - "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different services can support different monitored resource types.The following are specific rules to service defined monitored resources for Monitoring and Logging:\nThe type, display_name, description, labels and launch_stage fields are all required.\nThe first label of the monitored resource descriptor must be resource_container. There are legacy monitored resource descritptors start with project_id.\nIt must include a location label.\nMaximum of default 5 service defined monitored resource descriptors is allowed per service.\nMaximum of default 10 labels per monitored resource is allowed.The default maximum limit can be overridden. Please follow https://cloud.google.com/monitoring/quotas", + "description": "An object that describes the schema of a MonitoredResource object using a type name and a set of labels. For example, the monitored resource descriptor for Google Compute Engine VM instances has a type of \"gce_instance\" and specifies the use of the labels \"instance_id\" and \"zone\" to identify particular VM instances.Different APIs can support different monitored resource types. APIs generally provide a list method that returns the monitored resource descriptors used by the API.", "id": "MonitoredResourceDescriptor", "properties": { "description": { @@ -3518,7 +4680,7 @@ "type": "string" }, "labels": { - "description": "Required. A set of labels used to describe instances of this monitored resource type. The label key name must follow:\nOnly upper and lower-case letters, digits and underscores (_) are allowed.\nLabel name must start with a letter or digit.\nThe maximum length of a label name is 100 characters.For example, an individual Google Cloud SQL database is identified by values for the labels database_id and location.", + "description": "Required. A set of labels used to describe instances of this monitored resource type. For example, an individual Google Cloud SQL database is identified by values for the labels \"database_id\" and \"zone\".", "items": { "$ref": "LabelDescriptor" }, @@ -3541,10 +4703,10 @@ "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects allowlisted. Alpha releases don't have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", "GA features are open to all developers and are considered stable and fully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our Terms of Service (https://cloud.google.com/terms/) and the Google Cloud Platform Subject to the Deprecation Policy (https://cloud.google.com/terms/deprecation) documentation." + "Deprecated features are scheduled to be shut down and removed. For more information, see the \"Deprecation Policy\" section of our Terms of Service (https://cloud.google.com/terms/) and the Google Cloud Platform Subject to the Deprecation Policy (https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, @@ -3553,7 +4715,7 @@ "type": "string" }, "type": { - "description": "Required. The monitored resource type. For example, the type cloudsql_database represents databases in Google Cloud SQL.All service defined monitored resource types must be prefixed with the service name, in the format of {service name}/{relative resource name}. The relative resource name must follow:\nOnly upper and lower-case letters and digits are allowed.\nIt must start with upper case character and is recommended to use Upper Camel Case style.\nThe maximum number of characters allowed for the relative_resource_name is 100.Note there are legacy service monitored resources not following this rule.", + "description": "Required. The monitored resource type. For example, the type \"cloudsql_database\" represents databases in Google Cloud SQL. For a list of types, see Monitoring resource types (https://cloud.google.com/monitoring/api/resources) and Logging resource types (https://cloud.google.com/logging/docs/api/v2/resource-list).", "type": "string" } }, @@ -3568,7 +4730,7 @@ "description": "Properties of the object.", "type": "any" }, - "description": "Output only. Values for predefined system metadata labels. System labels are a kind of metadata extracted by Google, including \"machine_image\", \"vpc\", \"subnet_id\", \"security_group\", \"name\", etc. System label values can be only strings, Boolean values, or a list of strings. For example:\n{ \"name\": \"my-test-instance\",\n \"security_group\": [\"a\", \"b\", \"c\"],\n \"spot_instance\": false }\n", + "description": "Output only. Values for predefined system metadata labels. System labels are a kind of metadata extracted by Google, including \"machine_image\", \"vpc\", \"subnet_id\", \"security_group\", \"name\", etc. System label values can be only strings, Boolean values, or a list of strings. For example: { \"name\": \"my-test-instance\", \"security_group\": [\"a\", \"b\", \"c\"], \"spot_instance\": false } ", "type": "object" }, "userLabels": { @@ -3581,6 +4743,42 @@ }, "type": "object" }, + "MonitoringQueryLanguageCondition": { + "description": "A condition type that allows alert policies to be defined using Monitoring Query Language (https://cloud.google.com/monitoring/mql).", + "id": "MonitoringQueryLanguageCondition", + "properties": { + "duration": { + "description": "The amount of time that a time series must violate the threshold to be considered failing. Currently, only values that are a multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. When choosing a duration, it is useful to keep in mind the frequency of the underlying time series data (which may also be affected by any alignments specified in the aggregations field); a good duration is long enough so that a single outlier does not generate spurious alerts, but short enough that unhealthy states are detected and alerted on quickly.", + "format": "google-duration", + "type": "string" + }, + "evaluationMissingData": { + "description": "A condition control that determines how metric-threshold conditions are evaluated when data stops arriving.", + "enum": [ + "EVALUATION_MISSING_DATA_UNSPECIFIED", + "EVALUATION_MISSING_DATA_INACTIVE", + "EVALUATION_MISSING_DATA_ACTIVE", + "EVALUATION_MISSING_DATA_NO_OP" + ], + "enumDescriptions": [ + "An unspecified evaluation missing data option. Equivalent to EVALUATION_MISSING_DATA_NO_OP.", + "If there is no data to evaluate the condition, then evaluate the condition as false.", + "If there is no data to evaluate the condition, then evaluate the condition as true.", + "Do not evaluate the condition to any value if there is no data." + ], + "type": "string" + }, + "query": { + "description": "Monitoring Query Language (https://cloud.google.com/monitoring/mql) query that outputs a boolean stream.", + "type": "string" + }, + "trigger": { + "$ref": "Trigger", + "description": "The number/percent of time series for which the comparison must hold in order for the condition to trigger. If unspecified, then the condition will trigger if the comparison is true for any of the time series that have been identified by filter and aggregations, or by the ratio, if denominator_filter and denominator_aggregations are specified." + } + }, + "type": "object" + }, "MutationRecord": { "description": "Describes a change made to a configuration.", "id": "MutationRecord", @@ -3601,6 +4799,10 @@ "description": "A NotificationChannel is a medium through which an alert is delivered when a policy violation is detected. Examples of channels include email, SMS, and third-party messaging applications. Fields containing sensitive information like authentication tokens or contact info are only partially populated on retrieval.", "id": "NotificationChannel", "properties": { + "creationRecord": { + "$ref": "MutationRecord", + "description": "Record of the creation of this channel." + }, "description": { "description": "An optional human-readable description of this notification channel. This description may provide additional details, beyond the display name, for the channel. This may not exceed 1024 Unicode characters.", "type": "string" @@ -3620,8 +4822,15 @@ "description": "Configuration fields that define the channel and its behavior. The permissible and required labels are specified in the NotificationChannelDescriptor.labels of the NotificationChannelDescriptor corresponding to the type field.", "type": "object" }, + "mutationRecords": { + "description": "Records of the modification of this channel.", + "items": { + "$ref": "MutationRecord" + }, + "type": "array" + }, "name": { - "description": "The full REST resource name for this channel. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]\nThe [CHANNEL_ID] is automatically assigned by the server on creation.", + "description": "The full REST resource name for this channel. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] The [CHANNEL_ID] is automatically assigned by the server on creation.", "type": "string" }, "type": { @@ -3688,17 +4897,34 @@ "The feature is not yet implemented. Users can not use it.", "Prelaunch features are hidden from users and are only visible internally.", "Early Access features are limited to a closed group of testers. To use these features, you must sign up in advance and sign a Trusted Tester agreement (which includes confidentiality provisions). These features may be unstable, changed in backward-incompatible ways, and are not guaranteed to be released.", - "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects whitelisted. Alpha releases don’t have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", + "Alpha is a limited availability test for releases before they are cleared for widespread use. By Alpha, all significant design issues are resolved and we are in the process of verifying functionality. Alpha customers need to apply for access, agree to applicable terms, and have their projects allowlisted. Alpha releases don't have to be feature complete, no SLAs are provided, and there are no technical support obligations, but they will be far enough along that customers can actually use them in test environments or for limited-use tests -- just like they would in normal production cases.", "Beta is the point at which we are ready to open a release for any customer to use. There are no SLA or technical support obligations in a Beta release. Products will be complete from a feature perspective, but may have some open outstanding issues. Beta releases are suitable for limited production use cases.", "GA features are open to all developers and are considered stable and fully qualified for production use.", - "Deprecated features are scheduled to be shut down and removed. For more information, see the “Deprecation Policy” section of our Terms of Service (https://cloud.google.com/terms/) and the Google Cloud Platform Subject to the Deprecation Policy (https://cloud.google.com/terms/deprecation) documentation." + "Deprecated features are scheduled to be shut down and removed. For more information, see the \"Deprecation Policy\" section of our Terms of Service (https://cloud.google.com/terms/) and the Google Cloud Platform Subject to the Deprecation Policy (https://cloud.google.com/terms/deprecation) documentation." ], "type": "string" }, "name": { - "description": "The full REST resource name for this descriptor. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE]\nIn the above, [TYPE] is the value of the type field.", + "description": "The full REST resource name for this descriptor. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE] In the above, [TYPE] is the value of the type field.", "type": "string" }, + "supportedTiers": { + "description": "The tiers that support this notification channel; the project service tier must be one of the supported_tiers.", + "items": { + "enum": [ + "SERVICE_TIER_UNSPECIFIED", + "SERVICE_TIER_BASIC", + "SERVICE_TIER_PREMIUM" + ], + "enumDescriptions": [ + "An invalid sentinel value, used to indicate that a tier has not been provided explicitly.", + "The Cloud Monitoring Basic tier, a free tier of service that provides basic features, a moderate allotment of logs, and access to built-in metrics. A number of features are not available in this tier. For more details, see the service tiers documentation (https://cloud.google.com/monitoring/workspaces/tiers).", + "The Cloud Monitoring Premium tier, a higher, more expensive tier of service that provides access to all Cloud Monitoring features, lets you use Cloud Monitoring with AWS accounts, and has a larger allotments for logs and metrics. For more details, see the service tiers documentation (https://cloud.google.com/monitoring/workspaces/tiers)." + ], + "type": "string" + }, + "type": "array" + }, "type": { "description": "The type of notification channel, such as \"email\" and \"sms\". To view the full list of channels, see Channel descriptors (https://cloud.google.com/monitoring/alerts/using-channels-api#ncd). Notification channel types are globally unique.", "type": "string" @@ -3706,6 +4932,53 @@ }, "type": "object" }, + "NotificationRateLimit": { + "description": "Control over the rate of notifications sent to this alert policy's notification channels.", + "id": "NotificationRateLimit", + "properties": { + "period": { + "description": "Not more than one notification per period.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, + "OperationMetadata": { + "description": "Contains metadata for longrunning operation for the edit Metrics Scope endpoints.", + "id": "OperationMetadata", + "properties": { + "createTime": { + "description": "The time when the batch request was received.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "Current state of the batch operation.", + "enum": [ + "STATE_UNSPECIFIED", + "CREATED", + "RUNNING", + "DONE", + "CANCELLED" + ], + "enumDescriptions": [ + "Invalid.", + "Request has been received.", + "Request is actively being processed.", + "The batch processing is done.", + "The batch processing was cancelled." + ], + "type": "string" + }, + "updateTime": { + "description": "The time when the operation result was last updated.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, "Option": { "description": "A protocol buffer option, which can be attached to a message, field, enumeration, etc.", "id": "Option", @@ -3745,6 +5018,18 @@ }, "type": "object" }, + "PingConfig": { + "description": "Information involved in sending ICMP pings alongside public HTTP/TCP checks. For HTTP, the pings are performed for each part of the redirect chain.", + "id": "PingConfig", + "properties": { + "pingsCount": { + "description": "Number of ICMP pings. A maximum of 3 ICMP pings is currently supported.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "Point": { "description": "A single data point in a time series.", "id": "Point", @@ -3792,7 +5077,7 @@ "type": "string" }, "query": { - "description": "Required. The query in the monitoring query language format. The default time zone is in UTC.", + "description": "Required. The query in the Monitoring Query Language (https://cloud.google.com/monitoring/mql/reference) format. The default time zone is in UTC.", "type": "string" } }, @@ -3884,6 +5169,40 @@ }, "type": "object" }, + "ResponseStatusCode": { + "description": "A status to accept. Either a status code class like \"2xx\", or an integer status code like \"200\".", + "id": "ResponseStatusCode", + "properties": { + "statusClass": { + "description": "A class of status codes to accept.", + "enum": [ + "STATUS_CLASS_UNSPECIFIED", + "STATUS_CLASS_1XX", + "STATUS_CLASS_2XX", + "STATUS_CLASS_3XX", + "STATUS_CLASS_4XX", + "STATUS_CLASS_5XX", + "STATUS_CLASS_ANY" + ], + "enumDescriptions": [ + "Default value that matches no status codes.", + "The class of status codes between 100 and 199.", + "The class of status codes between 200 and 299.", + "The class of status codes between 300 and 399.", + "The class of status codes between 400 and 499.", + "The class of status codes between 500 and 599.", + "The class of all status codes." + ], + "type": "string" + }, + "statusValue": { + "description": "A status code to accept.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "SendNotificationChannelVerificationCodeRequest": { "description": "The SendNotificationChannelVerificationCode request.", "id": "SendNotificationChannelVerificationCodeRequest", @@ -3898,10 +5217,18 @@ "$ref": "AppEngine", "description": "Type used for App Engine services." }, + "basicService": { + "$ref": "BasicService", + "description": "Message that contains the service type and service labels of this service if it is a basic service. Documentation and examples here (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli)." + }, "cloudEndpoints": { "$ref": "CloudEndpoints", "description": "Type used for Cloud Endpoints services." }, + "cloudRun": { + "$ref": "CloudRun", + "description": "Type used for Cloud Run services." + }, "clusterIstio": { "$ref": "ClusterIstio", "description": "Type used for Istio services that live in a Kubernetes cluster." @@ -3914,23 +5241,46 @@ "description": "Name used for UI elements listing this Service.", "type": "string" }, + "gkeNamespace": { + "$ref": "GkeNamespace", + "description": "Type used for GKE Namespaces." + }, + "gkeService": { + "$ref": "GkeService", + "description": "Type used for GKE Services (the Kubernetes concept of a service)." + }, + "gkeWorkload": { + "$ref": "GkeWorkload", + "description": "Type used for GKE Workloads." + }, + "istioCanonicalService": { + "$ref": "IstioCanonicalService", + "description": "Type used for canonical services scoped to an Istio mesh. Metrics for Istio are documented here (https://istio.io/latest/docs/reference/config/metrics/)" + }, "meshIstio": { "$ref": "MeshIstio", "description": "Type used for Istio services scoped to an Istio mesh." }, "name": { - "description": "Resource name for this Service. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]\n", + "description": "Resource name for this Service. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] ", "type": "string" }, "telemetry": { "$ref": "Telemetry", "description": "Configuration for how to query telemetry on a Service." + }, + "userLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels which have been used to annotate the service. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value.", + "type": "object" } }, "type": "object" }, "ServiceLevelIndicator": { - "description": "A Service-Level Indicator (SLI) describes the \"performance\" of a service. For some services, the SLI is well-defined. In such cases, the SLI can be described easily by referencing the well-known SLI and providing the needed parameters. Alternatively, a \"custom\" SLI can be defined with a query to the underlying metric store. An SLI is defined to be good_service /\ntotal_service over any queried time interval. The value of performance always falls into the range 0 \u003c= performance \u003c= 1. A custom SLI describes how to compute this ratio, whether this is by dividing values from a pair of time series, cutting a Distribution into good and bad counts, or counting time windows in which the service complies with a criterion. For separation of concerns, a single Service-Level Indicator measures performance for only one aspect of service quality, such as fraction of successful queries or fast-enough queries.", + "description": "A Service-Level Indicator (SLI) describes the \"performance\" of a service. For some services, the SLI is well-defined. In such cases, the SLI can be described easily by referencing the well-known SLI and providing the needed parameters. Alternatively, a \"custom\" SLI can be defined with a query to the underlying metric store. An SLI is defined to be good_service / total_service over any queried time interval. The value of performance always falls into the range 0 \u003c= performance \u003c= 1. A custom SLI describes how to compute this ratio, whether this is by dividing values from a pair of time series, cutting a Distribution into good and bad counts, or counting time windows in which the service complies with a criterion. For separation of concerns, a single Service-Level Indicator measures performance for only one aspect of service quality, such as fraction of successful queries or fast-enough queries.", "id": "ServiceLevelIndicator", "properties": { "basicSli": { @@ -3953,7 +5303,7 @@ "id": "ServiceLevelObjective", "properties": { "calendarPeriod": { - "description": "A calendar period, semantically \"since the start of the current \u003ccalendar_period\u003e\". At this time, only DAY, WEEK, FORTNIGHT, and MONTH are supported.", + "description": "A calendar period, semantically \"since the start of the current \". At this time, only DAY, WEEK, FORTNIGHT, and MONTH are supported.", "enum": [ "CALENDAR_PERIOD_UNSPECIFIED", "DAY", @@ -3986,17 +5336,47 @@ "type": "number" }, "name": { - "description": "Resource name for this ServiceLevelObjective. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]\n", + "description": "Resource name for this ServiceLevelObjective. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] ", "type": "string" }, "rollingPeriod": { - "description": "A rolling time period, semantically \"in the past \u003crolling_period\u003e\". Must be an integer multiple of 1 day no larger than 30 days.", + "description": "A rolling time period, semantically \"in the past \". Must be an integer multiple of 1 day no larger than 30 days.", "format": "google-duration", "type": "string" }, "serviceLevelIndicator": { "$ref": "ServiceLevelIndicator", "description": "The definition of good service, used to measure and calculate the quality of the Service's performance with respect to a single aspect of service quality." + }, + "userLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels which have been used to annotate the service-level objective. Label keys must start with a letter. Label keys and values may contain lowercase letters, numbers, underscores, and dashes. Label keys and values have a maximum length of 63 characters, and must be less than 128 bytes in size. Up to 64 label entries may be stored. For labels which do not have a semantic value, the empty string may be supplied for the label value.", + "type": "object" + } + }, + "type": "object" + }, + "Snooze": { + "description": "A Snooze will prevent any alerts from being opened, and close any that are already open. The Snooze will work on alerts that match the criteria defined in the Snooze. The Snooze will be active from interval.start_time through interval.end_time.", + "id": "Snooze", + "properties": { + "criteria": { + "$ref": "Criteria", + "description": "Required. This defines the criteria for applying the Snooze. See Criteria for more information." + }, + "displayName": { + "description": "Required. A display name for the Snooze. This can be, at most, 512 unicode characters.", + "type": "string" + }, + "interval": { + "$ref": "TimeInterval", + "description": "Required. The Snooze will be active from interval.start_time through interval.end_time. interval.start_time cannot be in the past. There is a 15 second clock skew to account for the time it takes for a request to reach the API from the UI." + }, + "name": { + "description": "Required. The name of the Snooze. The format is: projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] The ID of the Snooze will be generated by the system.", + "type": "string" } }, "type": "object" @@ -4013,11 +5393,11 @@ "type": "object" }, "SpanContext": { - "description": "The context of a span, attached to Exemplars in Distribution values during aggregation.It contains the name of a span with format:\nprojects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID]\n", + "description": "The context of a span. This is attached to an Exemplar in Distribution values during aggregation.It contains the name of a span with format: projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] ", "id": "SpanContext", "properties": { "spanName": { - "description": "The resource name of the span. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID]\n[TRACE_ID] is a unique identifier for a trace within a project; it is a 32-character hexadecimal encoding of a 16-byte array.[SPAN_ID] is a unique identifier for a span within a trace; it is a 16-character hexadecimal encoding of an 8-byte array.", + "description": "The resource name of the span. The format is: projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] [TRACE_ID] is a unique identifier for a trace within a project; it is a 32-character hexadecimal encoding of a 16-byte array.[SPAN_ID] is a unique identifier for a span within a trace; it is a 16-character hexadecimal encoding of an 8-byte array.", "type": "string" } }, @@ -4054,6 +5434,10 @@ "description": "Information required for a TCP Uptime check request.", "id": "TcpCheck", "properties": { + "pingConfig": { + "$ref": "PingConfig", + "description": "Contains information needed to add pings to a TCP check." + }, "port": { "description": "The TCP port on the server against which to run the check. Will be combined with host (specified within the monitored_resource) to construct the full URL. Required.", "format": "int32", @@ -4074,7 +5458,7 @@ "type": "object" }, "TimeInterval": { - "description": "A closed time interval. It extends from the start time to the end time, and includes both: [startTime, endTime]. Valid time intervals depend on the MetricKind of the metric value. In no case can the end time be earlier than the start time.\nFor GAUGE metrics, the startTime value is technically optional; if no value is specified, the start time defaults to the value of the end time, and the interval represents a single point in time. If both start and end times are specified, they must be identical. Such an interval is valid only for GAUGE metrics, which are point-in-time measurements. The end time of a new interval must be at least a millisecond after the end time of the previous interval.\nFor DELTA metrics, the start time and end time must specify a non-zero interval, with subsequent points specifying contiguous and non-overlapping intervals. For DELTA metrics, the start time of the next interval must be at least a millisecond after the end time of the previous interval.\nFor CUMULATIVE metrics, the start time and end time must specify a a non-zero interval, with subsequent points specifying the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points. The new start time must be at least a millisecond after the end time of the previous interval.\nThe start time of a new interval must be at least a millisecond after the end time of the previous interval because intervals are closed. If the start time of a new interval is the same as the end time of the previous interval, then data written at the new start time could overwrite data written at the previous end time.", + "description": "Describes a time interval: Reads: A half-open time interval. It includes the end time but excludes the start time: (startTime, endTime]. The start time must be specified, must be earlier than the end time, and should be no older than the data retention period for the metric. Writes: A closed time interval. It extends from the start time to the end time, and includes both: [startTime, endTime]. Valid time intervals depend on the MetricKind (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) of the metric value. The end time must not be earlier than the start time, and the end time must not be more than 25 hours in the past or more than five minutes in the future. For GAUGE metrics, the startTime value is technically optional; if no value is specified, the start time defaults to the value of the end time, and the interval represents a single point in time. If both start and end times are specified, they must be identical. Such an interval is valid only for GAUGE metrics, which are point-in-time measurements. The end time of a new interval must be at least a millisecond after the end time of the previous interval. For DELTA metrics, the start time and end time must specify a non-zero interval, with subsequent points specifying contiguous and non-overlapping intervals. For DELTA metrics, the start time of the next interval must be at least a millisecond after the end time of the previous interval. For CUMULATIVE metrics, the start time and end time must specify a non-zero interval, with subsequent points specifying the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points. The new start time must be at least a millisecond after the end time of the previous interval. The start time of a new interval must be at least a millisecond after the end time of the previous interval because intervals are closed. If the start time of a new interval is the same as the end time of the previous interval, then data written at the new start time could overwrite data written at the previous end time.", "id": "TimeInterval", "properties": { "endTime": { @@ -4127,7 +5511,7 @@ }, "resource": { "$ref": "MonitoredResource", - "description": "The associated monitored resource. Custom metrics can use only certain monitored resource types in their time series data." + "description": "The associated monitored resource. Custom metrics can use only certain monitored resource types in their time series data. For more information, see Monitored resources for custom metrics (https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources)." }, "unit": { "description": "The units in which the metric value is reported. It is only applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The unit defines the representation of the stored metric values.", @@ -4201,7 +5585,7 @@ "type": "object" }, "TimeSeriesRatio": { - "description": "A TimeSeriesRatio specifies two TimeSeries to use for computing the good_service / total_service ratio. The specified TimeSeries must have ValueType = DOUBLE or ValueType = INT64 and must have MetricKind =\nDELTA or MetricKind = CUMULATIVE. The TimeSeriesRatio must specify exactly two of good, bad, and total, and the relationship good_service +\nbad_service = total_service will be assumed.", + "description": "A TimeSeriesRatio specifies two TimeSeries to use for computing the good_service / total_service ratio. The specified TimeSeries must have ValueType = DOUBLE or ValueType = INT64 and must have MetricKind = DELTA or MetricKind = CUMULATIVE. The TimeSeriesRatio must specify exactly two of good, bad, and total, and the relationship good_service + bad_service = total_service will be assumed.", "id": "TimeSeriesRatio", "properties": { "badServiceFilter": { @@ -4209,11 +5593,11 @@ "type": "string" }, "goodServiceFilter": { - "description": "A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) specifying a TimeSeries quantifying good service provided. Must have ValueType = DOUBLE or ValueType = INT64 and must have MetricKind =\nDELTA or MetricKind = CUMULATIVE.", + "description": "A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) specifying a TimeSeries quantifying good service provided. Must have ValueType = DOUBLE or ValueType = INT64 and must have MetricKind = DELTA or MetricKind = CUMULATIVE.", "type": "string" }, "totalServiceFilter": { - "description": "A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) specifying a TimeSeries quantifying total demanded service. Must have ValueType = DOUBLE or ValueType = INT64 and must have MetricKind =\nDELTA or MetricKind = CUMULATIVE.", + "description": "A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) specifying a TimeSeries quantifying total demanded service. Must have ValueType = DOUBLE or ValueType = INT64 and must have MetricKind = DELTA or MetricKind = CUMULATIVE.", "type": "string" } }, @@ -4297,12 +5681,12 @@ "description": "A distribution value." }, "doubleValue": { - "description": "A 64-bit double-precision floating-point number. Its magnitude is approximately \u0026plusmn;10\u003csup\u003e\u0026plusmn;300\u003c/sup\u003e and it has 16 significant digits of precision.", + "description": "A 64-bit double-precision floating-point number. Its magnitude is approximately ±10±300 and it has 16 significant digits of precision.", "format": "double", "type": "number" }, "int64Value": { - "description": "A 64-bit integer. Its range is approximately \u0026plusmn;9.2x10\u003csup\u003e18\u003c/sup\u003e.", + "description": "A 64-bit integer. Its range is approximately ±9.2x1018.", "format": "int64", "type": "string" }, @@ -4317,6 +5701,20 @@ "description": "This message configures which resources and services to monitor for availability.", "id": "UptimeCheckConfig", "properties": { + "checkerType": { + "description": "The type of checkers to use to execute the Uptime check.", + "enum": [ + "CHECKER_TYPE_UNSPECIFIED", + "STATIC_IP_CHECKERS", + "VPC_CHECKERS" + ], + "enumDescriptions": [ + "The default checker type. Currently converted to STATIC_IP_CHECKERS on creation, the default conversion behavior may change in the future.", + "STATIC_IP_CHECKERS are used for uptime checks that perform egress across the public internet. STATIC_IP_CHECKERS use the static IP addresses returned by ListUptimeCheckIps.", + "VPC_CHECKERS are used for uptime checks that perform egress using Service Directory and private network access. When using VPC_CHECKERS, the monitored resource type must be servicedirectory_service." + ], + "type": "string" + }, "contentMatchers": { "description": "The content that is expected to appear in the data returned by the target server against which the check is run. Currently, only the first entry in the content_matchers list is supported, and additional entries will be ignored. This field is optional and should only be specified if a content match is required as part of the/ Uptime check.", "items": { @@ -4325,7 +5723,7 @@ "type": "array" }, "displayName": { - "description": "A human-friendly name for the Uptime check configuration. The display name should be unique within a Stackdriver Workspace in order to make it easier to identify; however, uniqueness is not enforced. Required.", + "description": "A human-friendly name for the Uptime check configuration. The display name should be unique within a Cloud Monitoring Workspace in order to make it easier to identify; however, uniqueness is not enforced. Required.", "type": "string" }, "httpCheck": { @@ -4345,10 +5743,10 @@ }, "monitoredResource": { "$ref": "MonitoredResource", - "description": "The monitored resource (https://cloud.google.com/monitoring/api/resources) associated with the configuration. The following monitored resource types are supported for Uptime checks: uptime_url, gce_instance, gae_app, aws_ec2_instance, aws_elb_load_balancer" + "description": "The monitored resource (https://cloud.google.com/monitoring/api/resources) associated with the configuration. The following monitored resource types are valid for this field: uptime_url, gce_instance, gae_app, aws_ec2_instance, aws_elb_load_balancer k8s_service servicedirectory_service cloud_run_revision" }, "name": { - "description": "A unique resource name for this Uptime check configuration. The format is:\n projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]\nThis field should be omitted when creating the Uptime check configuration; on create, the resource name is assigned by the server and included in the response.", + "description": "A unique resource name for this Uptime check configuration. The format is: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] [PROJECT_ID_OR_NUMBER] is the Workspace host project associated with the Uptime check.This field should be omitted when creating the Uptime check configuration; on create, the resource name is assigned by the server and included in the response.", "type": "string" }, "period": { @@ -4362,20 +5760,26 @@ }, "selectedRegions": { "description": "The list of regions from which the check will be run. Some regions contain one location, and others contain more than one. If this field is specified, enough regions must be provided to include a minimum of 3 locations. Not specifying this field will result in Uptime checks running from all available regions.", - "enumDescriptions": [ - "Default value if no region is specified. Will result in Uptime checks running from all regions.", - "Allows checks to run from locations within the United States of America.", - "Allows checks to run from locations within the continent of Europe.", - "Allows checks to run from locations within the continent of South America.", - "Allows checks to run from locations within the Asia Pacific area (ex: Singapore)." - ], "items": { "enum": [ "REGION_UNSPECIFIED", "USA", "EUROPE", "SOUTH_AMERICA", - "ASIA_PACIFIC" + "ASIA_PACIFIC", + "USA_OREGON", + "USA_IOWA", + "USA_VIRGINIA" + ], + "enumDescriptions": [ + "Default value if no region is specified. Will result in Uptime checks running from all regions.", + "Allows checks to run from locations within the United States of America.", + "Allows checks to run from locations within the continent of Europe.", + "Allows checks to run from locations within the continent of South America.", + "Allows checks to run from locations within the Asia Pacific area (ex: Singapore).", + "Allows checks to run from locations within the western United States of America", + "Allows checks to run from locations within the central United States of America", + "Allows checks to run from locations within the eastern United States of America" ], "type": "string" }, @@ -4389,6 +5793,13 @@ "description": "The maximum amount of time to wait for the request to complete (must be between 1 and 60 seconds). Required.", "format": "google-duration", "type": "string" + }, + "userLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "User-supplied key/value data to be used for organizing and identifying the UptimeCheckConfig objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.", + "type": "object" } }, "type": "object" @@ -4412,14 +5823,20 @@ "USA", "EUROPE", "SOUTH_AMERICA", - "ASIA_PACIFIC" + "ASIA_PACIFIC", + "USA_OREGON", + "USA_IOWA", + "USA_VIRGINIA" ], "enumDescriptions": [ "Default value if no region is specified. Will result in Uptime checks running from all regions.", "Allows checks to run from locations within the United States of America.", "Allows checks to run from locations within the continent of Europe.", "Allows checks to run from locations within the continent of South America.", - "Allows checks to run from locations within the Asia Pacific area (ex: Singapore)." + "Allows checks to run from locations within the Asia Pacific area (ex: Singapore).", + "Allows checks to run from locations within the western United States of America", + "Allows checks to run from locations within the central United States of America", + "Allows checks to run from locations within the eastern United States of America" ], "type": "string" } diff --git a/prometheus-to-sd/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go b/prometheus-to-sd/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go index d604c51e4..6f700c345 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/monitoring/v3/monitoring-gen.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC. +// Copyright 2023 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -10,35 +10,35 @@ // // For product documentation, see: https://cloud.google.com/monitoring/api/ // -// Creating a client +// # Creating a client // // Usage example: // -// import "google.golang.org/api/monitoring/v3" -// ... -// ctx := context.Background() -// monitoringService, err := monitoring.NewService(ctx) +// import "google.golang.org/api/monitoring/v3" +// ... +// ctx := context.Background() +// monitoringService, err := monitoring.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // -// Other authentication options +// # Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // -// monitoringService, err := monitoring.NewService(ctx, option.WithScopes(monitoring.MonitoringWriteScope)) +// monitoringService, err := monitoring.NewService(ctx, option.WithScopes(monitoring.MonitoringWriteScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // -// monitoringService, err := monitoring.NewService(ctx, option.WithAPIKey("AIza...")) +// monitoringService, err := monitoring.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // -// config := &oauth2.Config{...} -// // ... -// token, err := config.Exchange(ctx, ...) -// monitoringService, err := monitoring.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// monitoringService, err := monitoring.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package monitoring // import "google.golang.org/api/monitoring/v3" @@ -56,6 +56,7 @@ import ( "strings" googleapi "google.golang.org/api/googleapi" + internal "google.golang.org/api/internal" gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" internaloption "google.golang.org/api/option/internaloption" @@ -85,7 +86,8 @@ const mtlsBasePath = "https://monitoring.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( - // View and manage your data across Google Cloud Platform services + // See, edit, configure, and delete your Google Cloud data and see the + // email address for your Google Account. CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" // View and write monitoring data for all of your Google and third-party @@ -102,7 +104,7 @@ const ( // NewService creates a new Service. func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { - scopesOption := option.WithScopes( + scopesOption := internaloption.WithDefaultScopes( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/monitoring.read", @@ -136,6 +138,8 @@ func New(client *http.Client) (*Service, error) { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} + s.Folders = NewFoldersService(s) + s.Organizations = NewOrganizationsService(s) s.Projects = NewProjectsService(s) s.Services = NewServicesService(s) s.UptimeCheckIps = NewUptimeCheckIpsService(s) @@ -147,6 +151,10 @@ type Service struct { BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment + Folders *FoldersService + + Organizations *OrganizationsService + Projects *ProjectsService Services *ServicesService @@ -161,6 +169,48 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func NewFoldersService(s *Service) *FoldersService { + rs := &FoldersService{s: s} + rs.TimeSeries = NewFoldersTimeSeriesService(s) + return rs +} + +type FoldersService struct { + s *Service + + TimeSeries *FoldersTimeSeriesService +} + +func NewFoldersTimeSeriesService(s *Service) *FoldersTimeSeriesService { + rs := &FoldersTimeSeriesService{s: s} + return rs +} + +type FoldersTimeSeriesService struct { + s *Service +} + +func NewOrganizationsService(s *Service) *OrganizationsService { + rs := &OrganizationsService{s: s} + rs.TimeSeries = NewOrganizationsTimeSeriesService(s) + return rs +} + +type OrganizationsService struct { + s *Service + + TimeSeries *OrganizationsTimeSeriesService +} + +func NewOrganizationsTimeSeriesService(s *Service) *OrganizationsTimeSeriesService { + rs := &OrganizationsTimeSeriesService{s: s} + return rs +} + +type OrganizationsTimeSeriesService struct { + s *Service +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.AlertPolicies = NewProjectsAlertPoliciesService(s) @@ -170,6 +220,7 @@ func NewProjectsService(s *Service) *ProjectsService { rs.MonitoredResourceDescriptors = NewProjectsMonitoredResourceDescriptorsService(s) rs.NotificationChannelDescriptors = NewProjectsNotificationChannelDescriptorsService(s) rs.NotificationChannels = NewProjectsNotificationChannelsService(s) + rs.Snoozes = NewProjectsSnoozesService(s) rs.TimeSeries = NewProjectsTimeSeriesService(s) rs.UptimeCheckConfigs = NewProjectsUptimeCheckConfigsService(s) return rs @@ -192,6 +243,8 @@ type ProjectsService struct { NotificationChannels *ProjectsNotificationChannelsService + Snoozes *ProjectsSnoozesService + TimeSeries *ProjectsTimeSeriesService UptimeCheckConfigs *ProjectsUptimeCheckConfigsService @@ -272,6 +325,15 @@ type ProjectsNotificationChannelsService struct { s *Service } +func NewProjectsSnoozesService(s *Service) *ProjectsSnoozesService { + rs := &ProjectsSnoozesService{s: s} + return rs +} + +type ProjectsSnoozesService struct { + s *Service +} + func NewProjectsTimeSeriesService(s *Service) *ProjectsTimeSeriesService { rs := &ProjectsTimeSeriesService{s: s} return rs @@ -349,11 +411,12 @@ type Aggregation struct { // seconds, that is used to divide the data in all the time series into // consistent blocks of time. This will be done before the per-series // aligner can be applied to the data.The value must be at least 60 - // seconds, at most 104 weeks. If a per-series aligner other than - // ALIGN_NONE is specified, this field is required or an error is - // returned. If no per-series aligner is specified, or the aligner - // ALIGN_NONE is specified, then this field is ignored.The maximum value - // of the alignment_period is 2 years, or 104 weeks. + // seconds. If a per-series aligner other than ALIGN_NONE is specified, + // this field is required or an error is returned. If no per-series + // aligner is specified, or the aligner ALIGN_NONE is specified, then + // this field is ignored.The maximum value of the alignment_period is + // 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for + // alerting policies. AlignmentPeriod string `json:"alignmentPeriod,omitempty"` // CrossSeriesReducer: The reduction operation to be used to combine @@ -569,10 +632,10 @@ type Aggregation struct { // ForceSendFields is a list of field names (e.g. "AlignmentPeriod") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AlignmentPeriod") to @@ -597,6 +660,10 @@ func (s *Aggregation) MarshalJSON() ([]byte, error) { // policies, see Introduction to Alerting // (https://cloud.google.com/monitoring/alerts/). type AlertPolicy struct { + // AlertStrategy: Control over how this alert policy's notification + // channels are notified. + AlertStrategy *AlertStrategy `json:"alertStrategy,omitempty"` + // Combiner: How to combine the results of multiple conditions to // determine if an incident should be opened. If // condition_time_series_query_language is present, this must be @@ -657,14 +724,11 @@ type AlertPolicy struct { MutationRecord *MutationRecord `json:"mutationRecord,omitempty"` // Name: Required if the policy exists. The resource name for this - // policy. The format - // is: + // policy. The format is: // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] - // [A - // LERT_POLICY_ID] is assigned by Stackdriver Monitoring when the policy - // is created. When calling the alertPolicies.create method, do not - // include the name field in the alerting policy passed as part of the - // request. + // [ALERT_POLICY_ID] is assigned by Cloud Monitoring when the policy is + // created. When calling the alertPolicies.create method, do not include + // the name field in the alerting policy passed as part of the request. Name string `json:"name,omitempty"` // NotificationChannels: Identifies the notification channels to which @@ -673,10 +737,8 @@ type AlertPolicy struct { // of this array corresponds to the name field in each of the // NotificationChannel objects that are returned from the // ListNotificationChannels method. The format of the entries in this - // field - // is: + // field is: // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] - // NotificationChannels []string `json:"notificationChannels,omitempty"` // UserLabels: User-supplied key/value data to be used for organizing @@ -696,15 +758,50 @@ type AlertPolicy struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Combiner") to + // ForceSendFields is a list of field names (e.g. "AlertStrategy") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AlertStrategy") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AlertPolicy) MarshalJSON() ([]byte, error) { + type NoMethod AlertPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AlertStrategy: Control over how the notification channels in +// notification_channels are notified when this alert fires. +type AlertStrategy struct { + // AutoClose: If an alert policy that was active has no data for this + // long, any open incidents will close + AutoClose string `json:"autoClose,omitempty"` + + // NotificationRateLimit: Required for alert policies with a LogMatch + // condition.This limit is not implemented for alert policies that are + // not log-based. + NotificationRateLimit *NotificationRateLimit `json:"notificationRateLimit,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoClose") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Combiner") to include in + // NullFields is a list of field names (e.g. "AutoClose") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -713,8 +810,8 @@ type AlertPolicy struct { NullFields []string `json:"-"` } -func (s *AlertPolicy) MarshalJSON() ([]byte, error) { - type NoMethod AlertPolicy +func (s *AlertStrategy) MarshalJSON() ([]byte, error) { + type NoMethod AlertStrategy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } @@ -724,16 +821,16 @@ func (s *AlertPolicy) MarshalJSON() ([]byte, error) { type AppEngine struct { // ModuleId: The ID of the App Engine module underlying this service. // Corresponds to the module_id resource label in the gae_app monitored - // resource: - // https://cloud.google.com/monitoring/api/resources#tag_gae_app + // resource + // (https://cloud.google.com/monitoring/api/resources#tag_gae_app). ModuleId string `json:"moduleId,omitempty"` // ForceSendFields is a list of field names (e.g. "ModuleId") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ModuleId") to include in @@ -770,10 +867,10 @@ type BasicAuthentication struct { // ForceSendFields is a list of field names (e.g. "Password") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Password") to include in @@ -791,6 +888,44 @@ func (s *BasicAuthentication) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BasicService: A well-known service type, defined by its service type +// and service labels. Documentation and examples here +// (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). +type BasicService struct { + // ServiceLabels: Labels that specify the resource that emits the + // monitoring data which is used for SLO reporting of this Service. + // Documentation and valid values for given service types here + // (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceLabels map[string]string `json:"serviceLabels,omitempty"` + + // ServiceType: The type of service that this basic service defines, + // e.g. APP_ENGINE service type. Documentation and valid values here + // (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + ServiceType string `json:"serviceType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ServiceLabels") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ServiceLabels") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BasicService) MarshalJSON() ([]byte, error) { + type NoMethod BasicService + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BasicSli: An SLI measuring performance on a well-known service type. // Performance will be computed on the basis of pre-defined metrics. The // type of the service_resource determines the metrics to use and the @@ -830,10 +965,10 @@ type BasicSli struct { // ForceSendFields is a list of field names (e.g. "Availability") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Availability") to include @@ -878,10 +1013,10 @@ type BucketOptions struct { // ForceSendFields is a list of field names (e.g. "ExplicitBuckets") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ExplicitBuckets") to @@ -905,16 +1040,16 @@ func (s *BucketOptions) MarshalJSON() ([]byte, error) { type CloudEndpoints struct { // Service: The name of the Cloud Endpoints service underlying this // service. Corresponds to the service resource label in the api - // monitored resource: - // https://cloud.google.com/monitoring/api/resources#tag_api + // monitored resource + // (https://cloud.google.com/monitoring/api/resources#tag_api). Service string `json:"service,omitempty"` // ForceSendFields is a list of field names (e.g. "Service") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Service") to include in @@ -932,8 +1067,46 @@ func (s *CloudEndpoints) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CloudRun: Cloud Run service. Learn more at +// https://cloud.google.com/run. +type CloudRun struct { + // Location: The location the service is run. Corresponds to the + // location resource label in the cloud_run_revision monitored resource + // (https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + Location string `json:"location,omitempty"` + + // ServiceName: The name of the Cloud Run service. Corresponds to the + // service_name resource label in the cloud_run_revision monitored + // resource + // (https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). + ServiceName string `json:"serviceName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Location") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Location") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CloudRun) MarshalJSON() ([]byte, error) { + type NoMethod CloudRun + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ClusterIstio: Istio service scoped to a single Kubernetes cluster. -// Learn more at http://istio.io. +// Learn more at https://istio.io. Clusters running OSS Istio will have +// their services ingested as this type. type ClusterIstio struct { // ClusterName: The name of the Kubernetes cluster in which this Istio // service is defined. Corresponds to the cluster_name resource label in @@ -957,10 +1130,10 @@ type ClusterIstio struct { // ForceSendFields is a list of field names (e.g. "ClusterName") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ClusterName") to include @@ -1004,15 +1177,15 @@ type CollectdPayload struct { TypeInstance string `json:"typeInstance,omitempty"` // Values: The measured values during this time interval. Each value - // must have a different dataSourceName. + // must have a different data_source_name. Values []*CollectdValue `json:"values,omitempty"` // ForceSendFields is a list of field names (e.g. "EndTime") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "EndTime") to include in @@ -1048,10 +1221,10 @@ type CollectdPayloadError struct { // ForceSendFields is a list of field names (e.g. "Error") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Error") to include in API @@ -1071,7 +1244,7 @@ func (s *CollectdPayloadError) MarshalJSON() ([]byte, error) { // CollectdValue: A single data point from a collectd-based plugin. type CollectdValue struct { - // DataSourceName: The data source for the collectd value. For example + // DataSourceName: The data source for the collectd value. For example, // there are two data sources for network measurements: "rx" and "tx". DataSourceName string `json:"dataSourceName,omitempty"` @@ -1096,10 +1269,10 @@ type CollectdValue struct { // ForceSendFields is a list of field names (e.g. "DataSourceName") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DataSourceName") to @@ -1130,10 +1303,10 @@ type CollectdValueError struct { // ForceSendFields is a list of field names (e.g. "Error") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Error") to include in API @@ -1159,6 +1332,15 @@ type Condition struct { // to receive new data points. ConditionAbsent *MetricAbsence `json:"conditionAbsent,omitempty"` + // ConditionMatchedLog: A condition that checks for log messages + // matching given constraints. If set, no other conditions can be + // present. + ConditionMatchedLog *LogMatch `json:"conditionMatchedLog,omitempty"` + + // ConditionMonitoringQueryLanguage: A condition that uses the + // Monitoring Query Language to define alerts. + ConditionMonitoringQueryLanguage *MonitoringQueryLanguageCondition `json:"conditionMonitoringQueryLanguage,omitempty"` + // ConditionThreshold: A condition that compares a time series against a // threshold. ConditionThreshold *MetricThreshold `json:"conditionThreshold,omitempty"` @@ -1169,14 +1351,12 @@ type Condition struct { DisplayName string `json:"displayName,omitempty"` // Name: Required if the condition exists. The unique resource name for - // this condition. Its format - // is: - // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditio - // ns/[CONDITION_ID] - // [CONDITION_ID] is assigned by Stackdriver Monitoring when the + // this condition. Its format is: + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[ + // CONDITION_ID] [CONDITION_ID] is assigned by Cloud Monitoring when the // condition is created as part of a new or updated alerting policy.When // calling the alertPolicies.create method, do not include the name - // field in the conditions of the requested alerting policy. Stackdriver + // field in the conditions of the requested alerting policy. Cloud // Monitoring creates the condition identifiers and includes them in the // new policy.When calling the alertPolicies.update method to update a // policy, including a condition name causes the existing condition to @@ -1190,10 +1370,10 @@ type Condition struct { // ForceSendFields is a list of field names (e.g. "ConditionAbsent") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ConditionAbsent") to @@ -1214,15 +1394,19 @@ func (s *Condition) MarshalJSON() ([]byte, error) { // ContentMatcher: Optional. Used to perform content matching. This // allows matching based on substrings and regular expressions, together -// with their negations. Only the first 4 MB of an HTTP or HTTPS -// check's response (and the first 1 MB of a TCP check's response) -// are examined for purposes of content matching. +// with their negations. Only the first 4 MB of an HTTP or HTTPS check's +// response (and the first 1 MB of a TCP check's response) are examined +// for purposes of content matching. type ContentMatcher struct { - // Content: String or regex content to match. Maximum 1024 bytes. An - // empty content string indicates no content matching is to be + // Content: String, regex or JSON content to match. Maximum 1024 bytes. + // An empty content string indicates no content matching is to be // performed. Content string `json:"content,omitempty"` + // JsonPathMatcher: Matcher information for MATCHES_JSON_PATH and + // NOT_MATCHES_JSON_PATH + JsonPathMatcher *JsonPathMatcher `json:"jsonPathMatcher,omitempty"` + // Matcher: The type of content matcher that will be applied to the // server output, compared to the content string when the check is run. // @@ -1237,19 +1421,28 @@ type ContentMatcher struct { // "NOT_CONTAINS_STRING" - Selects negation of substring matching. The // match succeeds if the output does NOT contain the content string. // "MATCHES_REGEX" - Selects regular-expression matching. The match - // succeeds of the output matches the regular expression specified in - // the content string. + // succeeds if the output matches the regular expression specified in + // the content string. Regex matching is only supported for HTTP/HTTPS + // checks. // "NOT_MATCHES_REGEX" - Selects negation of regular-expression // matching. The match succeeds if the output does NOT match the regular - // expression specified in the content string. + // expression specified in the content string. Regex matching is only + // supported for HTTP/HTTPS checks. + // "MATCHES_JSON_PATH" - Selects JSONPath matching. See + // JsonPathMatcher for details on when the match succeeds. JSONPath + // matching is only supported for HTTP/HTTPS checks. + // "NOT_MATCHES_JSON_PATH" - Selects JSONPath matching. See + // JsonPathMatcher for details on when the match succeeds. Succeeds when + // output does NOT match as specified. JSONPath is only supported for + // HTTP/HTTPS checks. Matcher string `json:"matcher,omitempty"` // ForceSendFields is a list of field names (e.g. "Content") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Content") to include in @@ -1285,10 +1478,10 @@ type CreateCollectdTimeSeriesRequest struct { // ForceSendFields is a list of field names (e.g. "CollectdPayloads") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CollectdPayloads") to @@ -1329,10 +1522,10 @@ type CreateCollectdTimeSeriesResponse struct { // ForceSendFields is a list of field names (e.g. "PayloadErrors") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "PayloadErrors") to include @@ -1363,10 +1556,10 @@ type CreateTimeSeriesRequest struct { // ForceSendFields is a list of field names (e.g. "TimeSeries") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "TimeSeries") to include in @@ -1400,10 +1593,10 @@ type CreateTimeSeriesSummary struct { // ForceSendFields is a list of field names (e.g. "Errors") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Errors") to include in API @@ -1421,8 +1614,43 @@ func (s *CreateTimeSeriesSummary) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Custom: Custom view of service telemetry. Currently a place-holder -// pending final design. +// Criteria: Criteria specific to the AlertPolicys that this Snooze +// applies to. The Snooze will suppress alerts that come from one of the +// AlertPolicys whose names are supplied. +type Criteria struct { + // Policies: The specific AlertPolicy names for the alert that should be + // snoozed. The format is: + // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] There is a + // limit of 16 policies per snooze. This limit is checked during snooze + // creation. + Policies []string `json:"policies,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Policies") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Policies") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Criteria) MarshalJSON() ([]byte, error) { + type NoMethod Criteria + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Custom: Use a custom service to designate a service that you want to +// monitor when none of the other service types (like App Engine, Cloud +// Run, or a GKE type) matches your intended service. type Custom struct { } @@ -1472,20 +1700,18 @@ type Distribution struct { Range *Range `json:"range,omitempty"` // SumOfSquaredDeviation: The sum of squared deviations from the mean of - // the values in the population. For values x_i this - // is: - // Sum[i=1..n]((x_i - mean)^2) - // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd - // edition describes Welford's method for accumulating this sum in one - // pass.If count is zero then this field must be zero. + // the values in the population. For values x_i this is: Sumi=1..n ((x_i + // - mean)^2) Knuth, "The Art of Computer Programming", Vol. 2, page + // 232, 3rd edition describes Welford's method for accumulating this sum + // in one pass.If count is zero then this field must be zero. SumOfSquaredDeviation float64 `json:"sumOfSquaredDeviation,omitempty"` // ForceSendFields is a list of field names (e.g. "BucketCounts") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BucketCounts") to include @@ -1521,15 +1747,15 @@ func (s *Distribution) UnmarshalJSON(data []byte) error { // DistributionCut: A DistributionCut defines a TimeSeries and // thresholds used for measuring good service and total service. The -// TimeSeries must have ValueType = -// DISTRIBUTION and MetricKind = DELTA or MetricKind = CUMULATIVE. The -// computed good_service will be the count of values x in the -// Distribution such that range.min <= x < range.max. +// TimeSeries must have ValueType = DISTRIBUTION and MetricKind = DELTA +// or MetricKind = CUMULATIVE. The computed good_service will be the +// estimated count of values in the Distribution that fall within the +// specified min and max. type DistributionCut struct { // DistributionFilter: A monitoring filter // (https://cloud.google.com/monitoring/api/v3/filters) specifying a - // TimeSeries aggregating values. Must have ValueType = - // DISTRIBUTION and MetricKind = DELTA or MetricKind = CUMULATIVE. + // TimeSeries aggregating values. Must have ValueType = DISTRIBUTION and + // MetricKind = DELTA or MetricKind = CUMULATIVE. DistributionFilter string `json:"distributionFilter,omitempty"` // Range: Range of values considered "good." For a one-sided range, set @@ -1538,10 +1764,10 @@ type DistributionCut struct { // ForceSendFields is a list of field names (e.g. "DistributionFilter") // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DistributionFilter") to @@ -1563,10 +1789,11 @@ func (s *DistributionCut) MarshalJSON() ([]byte, error) { // Documentation: A content string and a MIME type that describes the // content string's format. type Documentation struct { - // Content: The text of the documentation, interpreted according to + // Content: The body of the documentation, interpreted according to // mime_type. The content may not exceed 8,192 Unicode characters and // may not exceed more than 10,240 bytes when encoded in UTF-8 format, - // whichever is smaller. + // whichever is smaller. This text can be templatized by using variables + // (https://cloud.google.com/monitoring/alerts/doc-variables). Content string `json:"content,omitempty"` // MimeType: The format of the content field. Presently, only the value @@ -1576,10 +1803,10 @@ type Documentation struct { // ForceSendFields is a list of field names (e.g. "Content") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Content") to include in @@ -1618,10 +1845,10 @@ type DroppedLabels struct { // ForceSendFields is a list of field names (e.g. "Label") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Label") to include in API @@ -1642,12 +1869,8 @@ func (s *DroppedLabels) MarshalJSON() ([]byte, error) { // Empty: A generic empty message that you can re-use to avoid defining // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For -// instance: -// service Foo { -// rpc Bar(google.protobuf.Empty) returns -// (google.protobuf.Empty); -// } -// The JSON representation for Empty is empty JSON object {}. +// instance: service Foo { rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); } type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. @@ -1665,10 +1888,10 @@ type Error struct { // ForceSendFields is a list of field names (e.g. "PointCount") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "PointCount") to include in @@ -1712,10 +1935,10 @@ type Exemplar struct { // ForceSendFields is a list of field names (e.g. "Attachments") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Attachments") to include @@ -1749,7 +1972,7 @@ func (s *Exemplar) UnmarshalJSON(data []byte) error { // Explicit: Specifies a set of buckets with arbitrary widths.There are // size(bounds) + 1 (= N) buckets. Bucket i has the following -// boundaries:Upper bound (0 <= i < N-1): boundsi Lower bound (1 <= i < +// boundaries:Upper bound (0 <= i < N-1): boundsi Lower bound (1 <= i < // N); boundsi - 1The bounds field must contain at least one element. If // bounds has only one element, then there are no finite buckets, and // that single element is the common boundary of the overflow and @@ -1760,10 +1983,10 @@ type Explicit struct { // ForceSendFields is a list of field names (e.g. "Bounds") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Bounds") to include in API @@ -1786,8 +2009,8 @@ func (s *Explicit) MarshalJSON() ([]byte, error) { // bucket represents a constant relative uncertainty on a specific value // in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket // i has the following boundaries:Upper bound (0 <= i < N-1): scale * -// (growth_factor ^ i). Lower bound (1 <= i < N): scale * -// (growth_factor ^ (i - 1)). +// (growth_factor ^ i). Lower bound (1 <= i < N): scale * (growth_factor +// ^ (i - 1)). type Exponential struct { // GrowthFactor: Must be greater than 1. GrowthFactor float64 `json:"growthFactor,omitempty"` @@ -1800,10 +2023,10 @@ type Exponential struct { // ForceSendFields is a list of field names (e.g. "GrowthFactor") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "GrowthFactor") to include @@ -1904,10 +2127,10 @@ type Field struct { // ForceSendFields is a list of field names (e.g. "Cardinality") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Cardinality") to include @@ -1925,6 +2148,40 @@ func (s *Field) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ForecastOptions: Options used when forecasting the time series and +// testing the predicted value against the threshold. +type ForecastOptions struct { + // ForecastHorizon: Required. The length of time into the future to + // forecast whether a time series will violate the threshold. If the + // predicted value is found to violate the threshold, and the violation + // is observed in all forecasts made for the configured duration, then + // the time series is considered to be failing. + ForecastHorizon string `json:"forecastHorizon,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ForecastHorizon") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ForecastHorizon") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ForecastOptions) MarshalJSON() ([]byte, error) { + type NoMethod ForecastOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GetNotificationChannelVerificationCodeRequest: The // GetNotificationChannelVerificationCode request. type GetNotificationChannelVerificationCodeRequest struct { @@ -1942,10 +2199,10 @@ type GetNotificationChannelVerificationCodeRequest struct { // ForceSendFields is a list of field names (e.g. "ExpireTime") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ExpireTime") to include in @@ -1984,10 +2241,10 @@ type GetNotificationChannelVerificationCodeResponse struct { // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Code") to include in API @@ -2005,10 +2262,147 @@ func (s *GetNotificationChannelVerificationCodeResponse) MarshalJSON() ([]byte, return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// GoogleMonitoringV3Range: Range of numerical values, inclusive of min -// and exclusive of max. If the open range "< range.max" is desired, set -// range.min = -infinity. If the open range ">= range.min" is desired, -// set range.max = infinity. +// GkeNamespace: GKE Namespace. The field names correspond to the +// resource metadata labels on monitored resources that fall under a +// namespace (for example, k8s_container or k8s_pod). +type GkeNamespace struct { + // ClusterName: The name of the parent cluster. + ClusterName string `json:"clusterName,omitempty"` + + // Location: The location of the parent cluster. This may be a zone or + // region. + Location string `json:"location,omitempty"` + + // NamespaceName: The name of this namespace. + NamespaceName string `json:"namespaceName,omitempty"` + + // ProjectId: Output only. The project this resource lives in. For + // legacy services migrated from the Custom type, this may be a distinct + // project from the one parenting the service itself. + ProjectId string `json:"projectId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClusterName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GkeNamespace) MarshalJSON() ([]byte, error) { + type NoMethod GkeNamespace + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GkeService: GKE Service. The "service" here represents a Kubernetes +// service object +// (https://kubernetes.io/docs/concepts/services-networking/service). +// The field names correspond to the resource labels on k8s_service +// monitored resources +// (https://cloud.google.com/monitoring/api/resources#tag_k8s_service). +type GkeService struct { + // ClusterName: The name of the parent cluster. + ClusterName string `json:"clusterName,omitempty"` + + // Location: The location of the parent cluster. This may be a zone or + // region. + Location string `json:"location,omitempty"` + + // NamespaceName: The name of the parent namespace. + NamespaceName string `json:"namespaceName,omitempty"` + + // ProjectId: Output only. The project this resource lives in. For + // legacy services migrated from the Custom type, this may be a distinct + // project from the one parenting the service itself. + ProjectId string `json:"projectId,omitempty"` + + // ServiceName: The name of this service. + ServiceName string `json:"serviceName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClusterName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GkeService) MarshalJSON() ([]byte, error) { + type NoMethod GkeService + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GkeWorkload: A GKE Workload (Deployment, StatefulSet, etc). The field +// names correspond to the metadata labels on monitored resources that +// fall under a workload (for example, k8s_container or k8s_pod). +type GkeWorkload struct { + // ClusterName: The name of the parent cluster. + ClusterName string `json:"clusterName,omitempty"` + + // Location: The location of the parent cluster. This may be a zone or + // region. + Location string `json:"location,omitempty"` + + // NamespaceName: The name of the parent namespace. + NamespaceName string `json:"namespaceName,omitempty"` + + // ProjectId: Output only. The project this resource lives in. For + // legacy services migrated from the Custom type, this may be a distinct + // project from the one parenting the service itself. + ProjectId string `json:"projectId,omitempty"` + + // TopLevelControllerName: The name of this workload. + TopLevelControllerName string `json:"topLevelControllerName,omitempty"` + + // TopLevelControllerType: The type of this workload (for example, + // "Deployment" or "DaemonSet") + TopLevelControllerType string `json:"topLevelControllerType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClusterName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GkeWorkload) MarshalJSON() ([]byte, error) { + type NoMethod GkeWorkload + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleMonitoringV3Range: Range of numerical values within min and +// max. type GoogleMonitoringV3Range struct { // Max: Range maximum. Max float64 `json:"max,omitempty"` @@ -2018,10 +2412,10 @@ type GoogleMonitoringV3Range struct { // ForceSendFields is a list of field names (e.g. "Max") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Max") to include in API @@ -2092,18 +2486,16 @@ type Group struct { // are clusters. IsCluster bool `json:"isCluster,omitempty"` - // Name: Output only. The name of this group. The format - // is: - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - // When creating a group, this field is ignored and a new name is - // created consisting of the project specified in the call to - // CreateGroup and a unique [GROUP_ID] that is generated automatically. + // Name: Output only. The name of this group. The format is: + // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] When creating a + // group, this field is ignored and a new name is created consisting of + // the project specified in the call to CreateGroup and a unique + // [GROUP_ID] that is generated automatically. Name string `json:"name,omitempty"` // ParentName: The name of the group's parent, if it has one. The format - // is: - // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] - // For groups with no parent, parent_name is the empty string, "". + // is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] For groups with + // no parent, parent_name is the empty string, "". ParentName string `json:"parentName,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2112,10 +2504,10 @@ type Group struct { // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include @@ -2136,6 +2528,12 @@ func (s *Group) MarshalJSON() ([]byte, error) { // HttpCheck: Information involved in an HTTP/HTTPS Uptime check // request. type HttpCheck struct { + // AcceptedResponseStatusCodes: If present, the check will only pass if + // the HTTP response status code is in this set of status codes. If + // empty, the HTTP status code will only pass if the HTTP status code is + // 200-299. + AcceptedResponseStatusCodes []*ResponseStatusCode `json:"acceptedResponseStatusCodes,omitempty"` + // AuthInfo: The authentication information. Optional when creating an // HTTP check; defaults to empty. AuthInfo *BasicAuthentication `json:"authInfo,omitempty"` @@ -2145,22 +2543,36 @@ type HttpCheck struct { // Users can provide a Content-Length header via the headers field or // the API will do so. If the request_method is GET and body is not // empty, the API will return an error. The maximum byte size is 1 - // megabyte. Note: As with all bytes fields JSON representations are - // base64 encoded. e.g.: "foo=bar" in URL-encoded form is "foo%3Dbar" - // and in base64 encoding is "Zm9vJTI1M0RiYXI=". + // megabyte.Note: If client libraries aren't used (which performs the + // conversion automatically) base64 encode your body data since the + // field is of bytes type. Body string `json:"body,omitempty"` - // ContentType: The content type to use for the check. + // ContentType: The content type header to use for the check. The + // following configurations result in errors: 1. Content type is + // specified in both the headers field and the content_type field. 2. + // Request method is GET and content_type is not TYPE_UNSPECIFIED 3. + // Request method is POST and content_type is TYPE_UNSPECIFIED. 4. + // Request method is POST and a "Content-Type" header is provided via + // headers field. The content_type field should be used instead. // // Possible values: - // "TYPE_UNSPECIFIED" - No content type specified. If the request - // method is POST, an unspecified content type results in a check - // creation rejection. + // "TYPE_UNSPECIFIED" - No content type specified. // "URL_ENCODED" - body is in URL-encoded form. Equivalent to setting // the Content-Type to application/x-www-form-urlencoded in the HTTP // request. + // "USER_PROVIDED" - body is in custom_content_type form. Equivalent + // to setting the Content-Type to the contents of custom_content_type in + // the HTTP request. ContentType string `json:"contentType,omitempty"` + // CustomContentType: A user provided content type header to use for the + // check. The invalid configurations outlined in the content_type field + // apply to custom_content_type, as well as the following: 1. + // content_type is URL_ENCODED and custom_content_type is set. 2. + // content_type is USER_PROVIDED and custom_content_type is not set. + CustomContentType string `json:"customContentType,omitempty"` + // Headers: The list of headers to send as part of the Uptime check // request. If two headers have the same key and different values, they // should be entered as a single header, with the value being a @@ -2186,6 +2598,10 @@ type HttpCheck struct { // automatically. Path string `json:"path,omitempty"` + // PingConfig: Contains information needed to add pings to an HTTP + // check. + PingConfig *PingConfig `json:"pingConfig,omitempty"` + // Port: Optional (defaults to 80 when use_ssl is false, and 443 when // use_ssl is true). The TCP port on the HTTP server against which to // run the check. Will be combined with host (specified within the @@ -2210,20 +2626,22 @@ type HttpCheck struct { // setting validate_ssl to true has no effect. ValidateSsl bool `json:"validateSsl,omitempty"` - // ForceSendFields is a list of field names (e.g. "AuthInfo") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AcceptedResponseStatusCodes") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AuthInfo") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. + // "AcceptedResponseStatusCodes") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. NullFields []string `json:"-"` } @@ -2237,8 +2655,8 @@ func (s *HttpCheck) MarshalJSON() ([]byte, error) { // private/internal GCP resources. type InternalChecker struct { // DisplayName: The checker's human-readable name. The display name - // should be unique within a Stackdriver Workspace in order to make it - // easier to identify; however, uniqueness is not enforced. + // should be unique within a Cloud Monitoring Metrics Scope in order to + // make it easier to identify; however, uniqueness is not enforced. DisplayName string `json:"displayName,omitempty"` // GcpZone: The GCP zone the Uptime check should egress from. Only @@ -2246,12 +2664,10 @@ type InternalChecker struct { // specified. GcpZone string `json:"gcpZone,omitempty"` - // Name: A unique resource name for this InternalChecker. The format - // is: - // projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER - // _ID] - // [PROJECT_ID_OR_NUMBER] is the Stackdriver Workspace project for the - // Uptime check config associated with the internal checker. + // Name: A unique resource name for this InternalChecker. The format is: + // projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID] + // [PROJECT_ID_OR_NUMBER] is the Cloud Monitoring Metrics Scope project + // for the Uptime check config associated with the internal checker. Name string `json:"name,omitempty"` // Network: The GCP VPC network (https://cloud.google.com/vpc/docs/vpc) @@ -2259,7 +2675,7 @@ type InternalChecker struct { Network string `json:"network,omitempty"` // PeerProjectId: The GCP project ID where the internal checker lives. - // Not necessary the same as the Workspace project. + // Not necessary the same as the Metrics Scope project. PeerProjectId string `json:"peerProjectId,omitempty"` // State: The current operational state of the internal checker. @@ -2271,23 +2687,22 @@ type InternalChecker struct { // configured. A checker in this state can be returned by // ListInternalCheckers or GetInternalChecker, as well as by examining // the long running Operation - // (https://cloud.google.com/apis/design/design_patterns#long_running_ope - // rations) that created it. + // (https://cloud.google.com/apis/design/design_patterns#long_running_operations) + // that created it. // "RUNNING" - The checker is running and available for use. A checker // in this state can be returned by ListInternalCheckers or // GetInternalChecker as well as by examining the long running Operation - // (https://cloud.google.com/apis/design/design_patterns#long_running_ope - // rations) that created it. If a checker is being torn down, it is - // neither visible nor usable, so there is no "deleting" or "down" - // state. + // (https://cloud.google.com/apis/design/design_patterns#long_running_operations) + // that created it. If a checker is being torn down, it is neither + // visible nor usable, so there is no "deleting" or "down" state. State string `json:"state,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include @@ -2305,17 +2720,106 @@ func (s *InternalChecker) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// IstioCanonicalService: Canonical service scoped to an Istio mesh. +// Anthos clusters running ASM >= 1.6.8 will have their services +// ingested as this type. +type IstioCanonicalService struct { + // CanonicalService: The name of the canonical service underlying this + // service. Corresponds to the destination_canonical_service_name metric + // label in label in Istio metrics + // (https://cloud.google.com/monitoring/api/metrics_istio). + CanonicalService string `json:"canonicalService,omitempty"` + + // CanonicalServiceNamespace: The namespace of the canonical service + // underlying this service. Corresponds to the + // destination_canonical_service_namespace metric label in Istio metrics + // (https://cloud.google.com/monitoring/api/metrics_istio). + CanonicalServiceNamespace string `json:"canonicalServiceNamespace,omitempty"` + + // MeshUid: Identifier for the Istio mesh in which this canonical + // service is defined. Corresponds to the mesh_uid metric label in Istio + // metrics (https://cloud.google.com/monitoring/api/metrics_istio). + MeshUid string `json:"meshUid,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CanonicalService") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CanonicalService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *IstioCanonicalService) MarshalJSON() ([]byte, error) { + type NoMethod IstioCanonicalService + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// JsonPathMatcher: Information needed to perform a JSONPath content +// match. Used for ContentMatcherOption::MATCHES_JSON_PATH and +// ContentMatcherOption::NOT_MATCHES_JSON_PATH. +type JsonPathMatcher struct { + // JsonMatcher: The type of JSONPath match that will be applied to the + // JSON output (ContentMatcher.content) + // + // Possible values: + // "JSON_PATH_MATCHER_OPTION_UNSPECIFIED" - No JSONPath matcher type + // specified (not valid). + // "EXACT_MATCH" - Selects 'exact string' matching. The match succeeds + // if the content at the json_path within the output is exactly the same + // as the content string. + // "REGEX_MATCH" - Selects regular-expression matching. The match + // succeeds if the content at the json_path within the output matches + // the regular expression specified in the content string. + JsonMatcher string `json:"jsonMatcher,omitempty"` + + // JsonPath: JSONPath within the response output pointing to the + // expected ContentMatcher::content to match against. + JsonPath string `json:"jsonPath,omitempty"` + + // ForceSendFields is a list of field names (e.g. "JsonMatcher") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "JsonMatcher") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *JsonPathMatcher) MarshalJSON() ([]byte, error) { + type NoMethod JsonPathMatcher + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // LabelDescriptor: A description of a label. type LabelDescriptor struct { // Description: A human-readable description for the label. Description string `json:"description,omitempty"` // Key: The key for this label. The key must meet the following - // criteria: - // Does not exceed 100 characters. - // Matches the following regular expression: [a-zA-Z][a-zA-Z0-9_]* - // The first character must be an upper- or lower-case letter. - // The remaining characters must be letters, digits, or underscores. + // criteria: Does not exceed 100 characters. Matches the following + // regular expression: [a-zA-Z][a-zA-Z0-9_]* The first character must be + // an upper- or lower-case letter. The remaining characters must be + // letters, digits, or underscores. Key string `json:"key,omitempty"` // ValueType: The type of data that can be assigned to the label. @@ -2329,10 +2833,10 @@ type LabelDescriptor struct { // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Description") to include @@ -2363,10 +2867,10 @@ type LabelValue struct { // ForceSendFields is a list of field names (e.g. "BoolValue") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoolValue") to include in @@ -2392,10 +2896,10 @@ type LatencyCriteria struct { // ForceSendFields is a list of field names (e.g. "Threshold") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Threshold") to include in @@ -2418,7 +2922,7 @@ func (s *LatencyCriteria) MarshalJSON() ([]byte, error) { // constant absolute uncertainty on the specific value in the // bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has // the following boundaries:Upper bound (0 <= i < N-1): offset + (width -// * i). Lower bound (1 <= i < N): offset + (width * (i - 1)). +// * i). Lower bound (1 <= i < N): offset + (width * (i - 1)). type Linear struct { // NumFiniteBuckets: Must be greater than 0. NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"` @@ -2431,10 +2935,10 @@ type Linear struct { // ForceSendFields is a list of field names (e.g. "NumFiniteBuckets") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NumFiniteBuckets") to @@ -2492,10 +2996,10 @@ type ListAlertPoliciesResponse struct { // ForceSendFields is a list of field names (e.g. "AlertPolicies") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AlertPolicies") to include @@ -2533,10 +3037,10 @@ type ListGroupMembersResponse struct { // ForceSendFields is a list of field names (e.g. "Members") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Members") to include in @@ -2571,10 +3075,10 @@ type ListGroupsResponse struct { // ForceSendFields is a list of field names (e.g. "Group") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Group") to include in API @@ -2610,10 +3114,10 @@ type ListMetricDescriptorsResponse struct { // ForceSendFields is a list of field names (e.g. "MetricDescriptors") // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "MetricDescriptors") to @@ -2651,10 +3155,10 @@ type ListMonitoredResourceDescriptorsResponse struct { // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include @@ -2691,10 +3195,10 @@ type ListNotificationChannelDescriptorsResponse struct { // ForceSendFields is a list of field names (e.g. "ChannelDescriptors") // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ChannelDescriptors") to @@ -2737,10 +3241,10 @@ type ListNotificationChannelsResponse struct { // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include @@ -2777,10 +3281,10 @@ type ListServiceLevelObjectivesResponse struct { // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include @@ -2815,10 +3319,10 @@ type ListServicesResponse struct { // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include @@ -2836,13 +3340,51 @@ func (s *ListServicesResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ListTimeSeriesResponse: The ListTimeSeries response. -type ListTimeSeriesResponse struct { - // ExecutionErrors: Query execution errors that may have caused the time - // series data returned to be incomplete. - ExecutionErrors []*Status `json:"executionErrors,omitempty"` +// ListSnoozesResponse: The results of a successful ListSnoozes call, +// containing the matching Snoozes. +type ListSnoozesResponse struct { + // NextPageToken: Page token for repeated calls to ListSnoozes, to fetch + // additional pages of results. If this is empty or missing, there are + // no more pages. + NextPageToken string `json:"nextPageToken,omitempty"` - // NextPageToken: If there are more results than have been returned, + // Snoozes: Snoozes matching this list call. + Snoozes []*Snooze `json:"snoozes,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListSnoozesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListSnoozesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ListTimeSeriesResponse: The ListTimeSeries response. +type ListTimeSeriesResponse struct { + // ExecutionErrors: Query execution errors that may have caused the time + // series data returned to be incomplete. + ExecutionErrors []*Status `json:"executionErrors,omitempty"` + + // NextPageToken: If there are more results than have been returned, // then this field is set to a non-empty value. To see the additional // results, use that value as page_token in the next call to this // method. @@ -2865,10 +3407,10 @@ type ListTimeSeriesResponse struct { // ForceSendFields is a list of field names (e.g. "ExecutionErrors") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ExecutionErrors") to @@ -2910,10 +3452,10 @@ type ListUptimeCheckConfigsResponse struct { // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include @@ -2952,10 +3494,10 @@ type ListUptimeCheckIpsResponse struct { // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include @@ -2973,7 +3515,54 @@ func (s *ListUptimeCheckIpsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// MeshIstio: Istio service scoped to an Istio mesh +// LogMatch: A condition type that checks whether a log message in the +// scoping project +// (https://cloud.google.com/monitoring/api/v3#project_name) satisfies +// the given filter. Logs from other projects in the metrics scope are +// not evaluated. +type LogMatch struct { + // Filter: Required. A logs-based filter. See Advanced Logs Queries + // (https://cloud.google.com/logging/docs/view/advanced-queries) for how + // this filter should be constructed. + Filter string `json:"filter,omitempty"` + + // LabelExtractors: Optional. A map from a label key to an extractor + // expression, which is used to extract the value for this label key. + // Each entry in this map is a specification for how data should be + // extracted from log entries that match filter. Each combination of + // extracted values is treated as a separate rule for the purposes of + // triggering notifications. Label keys and corresponding values can be + // used in notifications generated by this condition.Please see the + // documentation on logs-based metric valueExtractors + // (https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics#LogMetric.FIELDS.value_extractor) + // for syntax and examples. + LabelExtractors map[string]string `json:"labelExtractors,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Filter") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Filter") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LogMatch) MarshalJSON() ([]byte, error) { + type NoMethod LogMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MeshIstio: Istio service scoped to an Istio mesh. Anthos clusters +// running ASM < 1.6.8 will have their services ingested as this type. type MeshIstio struct { // MeshUid: Identifier for the mesh in which this Istio service is // defined. Corresponds to the mesh_uid metric label in Istio metrics. @@ -2991,10 +3580,10 @@ type MeshIstio struct { // ForceSendFields is a list of field names (e.g. "MeshUid") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "MeshUid") to include in @@ -3025,10 +3614,10 @@ type Metric struct { // ForceSendFields is a list of field names (e.g. "Labels") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Labels") to include in API @@ -3056,31 +3645,31 @@ type MetricAbsence struct { // time series as well as how to combine the retrieved time series // together (such as when aggregating multiple streams on each resource // to a single stream for each resource or when aggregating streams - // across all members of a group of resrouces). Multiple aggregations + // across all members of a group of resources). Multiple aggregations // are applied in the order specified.This field is similar to the one // in the ListTimeSeries request - // (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeS - // eries/list). It is advisable to use the ListTimeSeries method when - // debugging this field. + // (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the ListTimeSeries method when debugging this + // field. Aggregations []*Aggregation `json:"aggregations,omitempty"` // Duration: The amount of time that a time series must fail to report - // new data to be considered failing. Currently, only values that are a - // multiple of a minute--e.g. 60, 120, or 300 seconds--are supported. If - // an invalid value is given, an error will be returned. The - // Duration.nanos field is ignored. + // new data to be considered failing. The minimum value of this field is + // 120 seconds. Larger values that are a multiple of a minute--for + // example, 240 or 300 seconds--are supported. If an invalid value is + // given, an error will be returned. The Duration.nanos field is + // ignored. Duration string `json:"duration,omitempty"` - // Filter: A filter (https://cloud.google.com/monitoring/api/v3/filters) - // that identifies which time series should be compared with the - // threshold.The filter is similar to the one that is specified in the - // ListTimeSeries request - // (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeS - // eries/list) (that call is useful to verify the time series that will - // be retrieved / processed) and must specify the metric type and - // optionally may contain restrictions on resource type, resource - // labels, and metric labels. This field may not exceed 2048 Unicode - // characters in length. + // Filter: Required. A filter + // (https://cloud.google.com/monitoring/api/v3/filters) that identifies + // which time series should be compared with the threshold.The filter is + // similar to the one that is specified in the ListTimeSeries request + // (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) + // (that call is useful to verify the time series that will be retrieved + // / processed). The filter must specify the metric type and the + // resource type. Optionally, it can specify resource labels and metric + // labels. This field must not exceed 2048 Unicode characters in length. Filter string `json:"filter,omitempty"` // Trigger: The number/percent of time series for which the comparison @@ -3091,10 +3680,10 @@ type MetricAbsence struct { // ForceSendFields is a list of field names (e.g. "Aggregations") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Aggregations") to include @@ -3114,17 +3703,7 @@ func (s *MetricAbsence) MarshalJSON() ([]byte, error) { // MetricDescriptor: Defines a metric type and its schema. Once a metric // descriptor is created, deleting or altering it stops data collection -// and makes the metric type's existing data unusable.The following are -// specific rules for service defined Monitoring metric -// descriptors: -// type, metric_kind, value_type and description fields are all -// required. The unit field must be specified if the value_type is any -// of DOUBLE, INT64, DISTRIBUTION. -// Maximum of default 500 metric descriptors per service is -// allowed. -// Maximum of default 10 labels per metric descriptor is allowed.The -// default maximum limit can be overridden. Please follow -// https://cloud.google.com/monitoring/quotas +// and makes the metric type's existing data unusable. type MetricDescriptor struct { // Description: A detailed description of the metric, which can be used // in documentation. @@ -3138,11 +3717,7 @@ type MetricDescriptor struct { DisplayName string `json:"displayName,omitempty"` // Labels: The set of labels that can be used to describe a specific - // instance of this metric type.The label key name must follow: - // Only upper and lower-case letters, digits and underscores (_) are - // allowed. - // Label name must start with a letter or digit. - // The maximum length of a label name is 100 characters.For example, the + // instance of this metric type. For example, the // appengine.googleapis.com/http/server/response_latencies metric type // has a label for the HTTP response code, response_code, so you can // look at latencies for successful responses or just for responses that @@ -3166,8 +3741,8 @@ type MetricDescriptor struct { // they are cleared for widespread use. By Alpha, all significant design // issues are resolved and we are in the process of verifying // functionality. Alpha customers need to apply for access, agree to - // applicable terms, and have their projects whitelisted. Alpha releases - // don’t have to be feature complete, no SLAs are provided, and there + // applicable terms, and have their projects allowlisted. Alpha releases + // don't have to be feature complete, no SLAs are provided, and there // are no technical support obligations, but they will be far enough // along that customers can actually use them in test environments or // for limited-use tests -- just like they would in normal production @@ -3180,7 +3755,7 @@ type MetricDescriptor struct { // "GA" - GA features are open to all developers and are considered // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more information, see the “Deprecation Policy” + // and removed. For more information, see the "Deprecation Policy" // section of our Terms of Service (https://cloud.google.com/terms/) and // the Google Cloud Platform Subject to the Deprecation Policy // (https://cloud.google.com/terms/deprecation) documentation. @@ -3215,108 +3790,63 @@ type MetricDescriptor struct { Name string `json:"name,omitempty"` // Type: The metric type, including its DNS name prefix. The type is not - // URL-encoded.All service defined metrics must be prefixed with the - // service name, in the format of {service name}/{relative metric name}, - // such as cloudsql.googleapis.com/database/cpu/utilization. The - // relative metric name must follow: - // Only upper and lower-case letters, digits, '/' and underscores '_' - // are allowed. - // The maximum number of characters allowed for the relative_metric_name - // is 100.All user-defined metric types have the DNS name - // custom.googleapis.com, external.googleapis.com, or - // logging.googleapis.com/user/.Metric types should use a natural - // hierarchical grouping. For - // example: + // URL-encoded. All user-defined metric types have the DNS name + // custom.googleapis.com or external.googleapis.com. Metric types should + // use a natural hierarchical grouping. For example: // "custom.googleapis.com/invoice/paid/amount" - // "external.googlea - // pis.com/prometheus/up" - // "appengine.googleapis.com/http/server/response_ - // latencies" - // + // "external.googleapis.com/prometheus/up" + // "appengine.googleapis.com/http/server/response_latencies" Type string `json:"type,omitempty"` // Unit: The units in which the metric value is reported. It is only // applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The // unit defines the representation of the stored metric values.Different - // systems may scale the values to be more easily displayed (so a value - // of 0.02KBy might be displayed as 20By, and a value of 3523KBy might - // be displayed as 3.5MBy). However, if the unit is KBy, then the value - // of the metric is always in thousands of bytes, no matter how it may - // be displayed..If you want a custom metric to record the exact number - // of CPU-seconds used by a job, you can create an INT64 CUMULATIVE - // metric whose unit is s{CPU} (or equivalently 1s{CPU} or just s). If - // the job uses 12,005 CPU-seconds, then the value is written as - // 12005.Alternatively, if you want a custom metric to record data in a - // more granular way, you can create a DOUBLE CUMULATIVE metric whose - // unit is ks{CPU}, and then write the value 12.005 (which is - // 12005/1000), or use Kis{CPU} and write 11.723 (which is + // systems might scale the values to be more easily displayed (so a + // value of 0.02kBy might be displayed as 20By, and a value of 3523kBy + // might be displayed as 3.5MBy). However, if the unit is kBy, then the + // value of the metric is always in thousands of bytes, no matter how it + // might be displayed.If you want a custom metric to record the exact + // number of CPU-seconds used by a job, you can create an INT64 + // CUMULATIVE metric whose unit is s{CPU} (or equivalently 1s{CPU} or + // just s). If the job uses 12,005 CPU-seconds, then the value is + // written as 12005.Alternatively, if you want a custom metric to record + // data in a more granular way, you can create a DOUBLE CUMULATIVE + // metric whose unit is ks{CPU}, and then write the value 12.005 (which + // is 12005/1000), or use Kis{CPU} and write 11.723 (which is // 12005/1024).The supported units are a subset of The Unified Code for - // Units of Measure (http://unitsofmeasure.org/ucum.html) standard:Basic - // units (UNIT) - // bit bit - // By byte - // s second - // min minute - // h hour - // d day - // 1 dimensionlessPrefixes (PREFIX) - // k kilo (10^3) - // M mega (10^6) - // G giga (10^9) - // T tera (10^12) - // P peta (10^15) - // E exa (10^18) - // Z zetta (10^21) - // Y yotta (10^24) - // m milli (10^-3) - // u micro (10^-6) - // n nano (10^-9) - // p pico (10^-12) - // f femto (10^-15) - // a atto (10^-18) - // z zepto (10^-21) - // y yocto (10^-24) - // Ki kibi (2^10) - // Mi mebi (2^20) - // Gi gibi (2^30) - // Ti tebi (2^40) - // Pi pebi (2^50)GrammarThe grammar also includes these connectors: - // / division or ratio (as an infix operator). For examples, - // kBy/{email} or MiBy/10ms (although you should almost never have /s - // in a metric unit; rates should always be computed at query time from - // the underlying cumulative or delta value). - // . multiplication or composition (as an infix operator). For - // examples, GBy.d or k{watt}.h.The grammar for a unit is as - // follows: - // Expression = Component { "." Component } { "/" Component } - // ; - // - // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] - // | Annotation - // | "1" - // ; - // - // Annotation = "{" NAME "}" ; - // Notes: - // Annotation is just a comment if it follows a UNIT. If the annotation - // is used alone, then the unit is equivalent to 1. For examples, - // {request}/s == 1/s, By{transmitted}/s == By/s. - // NAME is a sequence of non-blank printable ASCII characters not - // containing { or }. - // 1 represents a unitary dimensionless unit - // (https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as + // Units of Measure (https://unitsofmeasure.org/ucum.html) + // standard:Basic units (UNIT) bit bit By byte s second min minute h + // hour d day 1 dimensionlessPrefixes (PREFIX) k kilo (10^3) M mega + // (10^6) G giga (10^9) T tera (10^12) P peta (10^15) E exa (10^18) Z + // zetta (10^21) Y yotta (10^24) m milli (10^-3) u micro (10^-6) n nano + // (10^-9) p pico (10^-12) f femto (10^-15) a atto (10^-18) z zepto + // (10^-21) y yocto (10^-24) Ki kibi (2^10) Mi mebi (2^20) Gi gibi + // (2^30) Ti tebi (2^40) Pi pebi (2^50)GrammarThe grammar also includes + // these connectors: / division or ratio (as an infix operator). For + // examples, kBy/{email} or MiBy/10ms (although you should almost never + // have /s in a metric unit; rates should always be computed at query + // time from the underlying cumulative or delta value). . multiplication + // or composition (as an infix operator). For examples, GBy.d or + // k{watt}.h.The grammar for a unit is as follows: Expression = + // Component { "." Component } { "/" Component } ; Component = ( [ + // PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation + // = "{" NAME "}" ; Notes: Annotation is just a comment if it follows a + // UNIT. If the annotation is used alone, then the unit is equivalent to + // 1. For examples, {request}/s == 1/s, By{transmitted}/s == By/s. NAME + // is a sequence of non-blank printable ASCII characters not containing + // { or }. 1 represents a unitary dimensionless unit + // (https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such as // in 1/s. It is typically used when none of the basic units are // appropriate. For example, "new users per day" can be represented as - // 1/d or {new-users}/d (and a metric value 5 would mean "5 new users). - // Alternatively, "thousands of page views per day" would be - // represented as 1000/d or k1/d or k{page_views}/d (and a metric value - // of 5.3 would mean "5300 page views per day"). - // % represents dimensionless value of 1/100, and annotates values - // giving a percentage (so the metric values are typically in the range - // of 0..100, and a metric value 3 means "3 percent"). - // 10^2.% indicates a metric contains a ratio, typically in the range - // 0..1, that will be multiplied by 100 and displayed as a percentage - // (so a metric value 0.03 means "3 percent"). + // 1/d or {new-users}/d (and a metric value 5 would mean "5 new users). + // Alternatively, "thousands of page views per day" would be represented + // as 1000/d or k1/d or k{page_views}/d (and a metric value of 5.3 would + // mean "5300 page views per day"). % represents dimensionless value of + // 1/100, and annotates values giving a percentage (so the metric values + // are typically in the range of 0..100, and a metric value 3 means "3 + // percent"). 10^2.% indicates a metric contains a ratio, typically in + // the range 0..1, that will be multiplied by 100 and displayed as a + // percentage (so a metric value 0.03 means "3 percent"). Unit string `json:"unit,omitempty"` // ValueType: Whether the measurement is an integer, a floating-point @@ -3341,10 +3871,10 @@ type MetricDescriptor struct { // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Description") to include @@ -3388,8 +3918,8 @@ type MetricDescriptorMetadata struct { // they are cleared for widespread use. By Alpha, all significant design // issues are resolved and we are in the process of verifying // functionality. Alpha customers need to apply for access, agree to - // applicable terms, and have their projects whitelisted. Alpha releases - // don’t have to be feature complete, no SLAs are provided, and there + // applicable terms, and have their projects allowlisted. Alpha releases + // don't have to be feature complete, no SLAs are provided, and there // are no technical support obligations, but they will be far enough // along that customers can actually use them in test environments or // for limited-use tests -- just like they would in normal production @@ -3402,7 +3932,7 @@ type MetricDescriptorMetadata struct { // "GA" - GA features are open to all developers and are considered // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more information, see the “Deprecation Policy” + // and removed. For more information, see the "Deprecation Policy" // section of our Terms of Service (https://cloud.google.com/terms/) and // the Google Cloud Platform Subject to the Deprecation Policy // (https://cloud.google.com/terms/deprecation) documentation. @@ -3416,10 +3946,10 @@ type MetricDescriptorMetadata struct { // ForceSendFields is a list of field names (e.g. "IngestDelay") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "IngestDelay") to include @@ -3438,7 +3968,7 @@ func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) { } // MetricRange: A MetricRange is used when each window is good when the -// value x of a single TimeSeries satisfies range.min <= x < range.max. +// value x of a single TimeSeries satisfies range.min <= x <= range.max. // The provided TimeSeries must have ValueType = INT64 or ValueType = // DOUBLE and MetricKind = GAUGE. type MetricRange struct { @@ -3453,10 +3983,10 @@ type MetricRange struct { // ForceSendFields is a list of field names (e.g. "Range") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Range") to include in API @@ -3481,12 +4011,12 @@ type MetricThreshold struct { // time series as well as how to combine the retrieved time series // together (such as when aggregating multiple streams on each resource // to a single stream for each resource or when aggregating streams - // across all members of a group of resrouces). Multiple aggregations + // across all members of a group of resources). Multiple aggregations // are applied in the order specified.This field is similar to the one // in the ListTimeSeries request - // (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeS - // eries/list). It is advisable to use the ListTimeSeries method when - // debugging this field. + // (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the ListTimeSeries method when debugging this + // field. Aggregations []*Aggregation `json:"aggregations,omitempty"` // Comparison: The comparison to apply between the time series @@ -3545,18 +4075,38 @@ type MetricThreshold struct { // alerted on quickly. Duration string `json:"duration,omitempty"` - // Filter: A filter (https://cloud.google.com/monitoring/api/v3/filters) - // that identifies which time series should be compared with the - // threshold.The filter is similar to the one that is specified in the - // ListTimeSeries request - // (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeS - // eries/list) (that call is useful to verify the time series that will - // be retrieved / processed) and must specify the metric type and - // optionally may contain restrictions on resource type, resource - // labels, and metric labels. This field may not exceed 2048 Unicode - // characters in length. + // EvaluationMissingData: A condition control that determines how + // metric-threshold conditions are evaluated when data stops arriving. + // + // Possible values: + // "EVALUATION_MISSING_DATA_UNSPECIFIED" - An unspecified evaluation + // missing data option. Equivalent to EVALUATION_MISSING_DATA_NO_OP. + // "EVALUATION_MISSING_DATA_INACTIVE" - If there is no data to + // evaluate the condition, then evaluate the condition as false. + // "EVALUATION_MISSING_DATA_ACTIVE" - If there is no data to evaluate + // the condition, then evaluate the condition as true. + // "EVALUATION_MISSING_DATA_NO_OP" - Do not evaluate the condition to + // any value if there is no data. + EvaluationMissingData string `json:"evaluationMissingData,omitempty"` + + // Filter: Required. A filter + // (https://cloud.google.com/monitoring/api/v3/filters) that identifies + // which time series should be compared with the threshold.The filter is + // similar to the one that is specified in the ListTimeSeries request + // (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) + // (that call is useful to verify the time series that will be retrieved + // / processed). The filter must specify the metric type and the + // resource type. Optionally, it can specify resource labels and metric + // labels. This field must not exceed 2048 Unicode characters in length. Filter string `json:"filter,omitempty"` + // ForecastOptions: When this field is present, the MetricThreshold + // condition forecasts whether the time series is predicted to violate + // the threshold within the forecast_horizon. When this field is not + // set, the MetricThreshold tests the current value of the timeseries + // against the threshold. + ForecastOptions *ForecastOptions `json:"forecastOptions,omitempty"` + // ThresholdValue: A value against which to compare the time series. ThresholdValue float64 `json:"thresholdValue,omitempty"` @@ -3570,10 +4120,10 @@ type MetricThreshold struct { // ForceSendFields is a list of field names (e.g. "Aggregations") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Aggregations") to include @@ -3614,11 +4164,9 @@ func (s *MetricThreshold) UnmarshalJSON(data []byte) error { // schema. For example, a particular Compute Engine VM instance could be // represented by the following object, because the // MonitoredResourceDescriptor for "gce_instance" has labels -// "instance_id" and "zone": -// { "type": "gce_instance", -// "labels": { "instance_id": "12345678901234", -// "zone": "us-central1-a" }} -// +// "project_id", "instance_id" and "zone": { "type": "gce_instance", +// "labels": { "project_id": "my-project", "instance_id": +// "12345678901234", "zone": "us-central1-a" }} type MonitoredResource struct { // Labels: Required. Values for all of the labels listed in the // associated monitored resource descriptor. For example, Compute Engine @@ -3628,15 +4176,18 @@ type MonitoredResource struct { // Type: Required. The monitored resource type. This field must match // the type field of a MonitoredResourceDescriptor object. For example, // the type of a Compute Engine VM instance is gce_instance. For a list - // of types, see Monitoring resource types and Logging resource types. + // of types, see Monitoring resource types + // (https://cloud.google.com/monitoring/api/resources) and Logging + // resource types + // (https://cloud.google.com/logging/docs/api/v2/resource-list). Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Labels") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Labels") to include in API @@ -3659,20 +4210,9 @@ func (s *MonitoredResource) MarshalJSON() ([]byte, error) { // example, the monitored resource descriptor for Google Compute Engine // VM instances has a type of "gce_instance" and specifies the use of // the labels "instance_id" and "zone" to identify particular VM -// instances.Different services can support different monitored resource -// types.The following are specific rules to service defined monitored -// resources for Monitoring and Logging: -// The type, display_name, description, labels and launch_stage fields -// are all required. -// The first label of the monitored resource descriptor must be -// resource_container. There are legacy monitored resource descritptors -// start with project_id. -// It must include a location label. -// Maximum of default 5 service defined monitored resource descriptors -// is allowed per service. -// Maximum of default 10 labels per monitored resource is allowed.The -// default maximum limit can be overridden. Please follow -// https://cloud.google.com/monitoring/quotas +// instances.Different APIs can support different monitored resource +// types. APIs generally provide a list method that returns the +// monitored resource descriptors used by the API. type MonitoredResourceDescriptor struct { // Description: Optional. A detailed description of the monitored // resource type that might be used in documentation. @@ -3685,13 +4225,9 @@ type MonitoredResourceDescriptor struct { DisplayName string `json:"displayName,omitempty"` // Labels: Required. A set of labels used to describe instances of this - // monitored resource type. The label key name must follow: - // Only upper and lower-case letters, digits and underscores (_) are - // allowed. - // Label name must start with a letter or digit. - // The maximum length of a label name is 100 characters.For example, an - // individual Google Cloud SQL database is identified by values for the - // labels database_id and location. + // monitored resource type. For example, an individual Google Cloud SQL + // database is identified by values for the labels "database_id" and + // "zone". Labels []*LabelDescriptor `json:"labels,omitempty"` // LaunchStage: Optional. The launch stage of the monitored resource @@ -3712,8 +4248,8 @@ type MonitoredResourceDescriptor struct { // they are cleared for widespread use. By Alpha, all significant design // issues are resolved and we are in the process of verifying // functionality. Alpha customers need to apply for access, agree to - // applicable terms, and have their projects whitelisted. Alpha releases - // don’t have to be feature complete, no SLAs are provided, and there + // applicable terms, and have their projects allowlisted. Alpha releases + // don't have to be feature complete, no SLAs are provided, and there // are no technical support obligations, but they will be far enough // along that customers can actually use them in test environments or // for limited-use tests -- just like they would in normal production @@ -3726,7 +4262,7 @@ type MonitoredResourceDescriptor struct { // "GA" - GA features are open to all developers and are considered // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more information, see the “Deprecation Policy” + // and removed. For more information, see the "Deprecation Policy" // section of our Terms of Service (https://cloud.google.com/terms/) and // the Google Cloud Platform Subject to the Deprecation Policy // (https://cloud.google.com/terms/deprecation) documentation. @@ -3742,16 +4278,11 @@ type MonitoredResourceDescriptor struct { Name string `json:"name,omitempty"` // Type: Required. The monitored resource type. For example, the type - // cloudsql_database represents databases in Google Cloud SQL.All - // service defined monitored resource types must be prefixed with the - // service name, in the format of {service name}/{relative resource - // name}. The relative resource name must follow: - // Only upper and lower-case letters and digits are allowed. - // It must start with upper case character and is recommended to use - // Upper Camel Case style. - // The maximum number of characters allowed for the - // relative_resource_name is 100.Note there are legacy service - // monitored resources not following this rule. + // "cloudsql_database" represents databases in Google Cloud SQL. For a + // list of types, see Monitoring resource types + // (https://cloud.google.com/monitoring/api/resources) and Logging + // resource types + // (https://cloud.google.com/logging/docs/api/v2/resource-list). Type string `json:"type,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3760,10 +4291,10 @@ type MonitoredResourceDescriptor struct { // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Description") to include @@ -3792,11 +4323,8 @@ type MonitoredResourceMetadata struct { // labels. System labels are a kind of metadata extracted by Google, // including "machine_image", "vpc", "subnet_id", "security_group", // "name", etc. System label values can be only strings, Boolean values, - // or a list of strings. For example: - // { "name": "my-test-instance", - // "security_group": ["a", "b", "c"], - // "spot_instance": false } - // + // or a list of strings. For example: { "name": "my-test-instance", + // "security_group": ["a", "b", "c"], "spot_instance": false } SystemLabels googleapi.RawMessage `json:"systemLabels,omitempty"` // UserLabels: Output only. A map of user-defined metadata labels. @@ -3804,10 +4332,10 @@ type MonitoredResourceMetadata struct { // ForceSendFields is a list of field names (e.g. "SystemLabels") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "SystemLabels") to include @@ -3825,6 +4353,72 @@ func (s *MonitoredResourceMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MonitoringQueryLanguageCondition: A condition type that allows alert +// policies to be defined using Monitoring Query Language +// (https://cloud.google.com/monitoring/mql). +type MonitoringQueryLanguageCondition struct { + // Duration: The amount of time that a time series must violate the + // threshold to be considered failing. Currently, only values that are a + // multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are + // supported. If an invalid value is given, an error will be returned. + // When choosing a duration, it is useful to keep in mind the frequency + // of the underlying time series data (which may also be affected by any + // alignments specified in the aggregations field); a good duration is + // long enough so that a single outlier does not generate spurious + // alerts, but short enough that unhealthy states are detected and + // alerted on quickly. + Duration string `json:"duration,omitempty"` + + // EvaluationMissingData: A condition control that determines how + // metric-threshold conditions are evaluated when data stops arriving. + // + // Possible values: + // "EVALUATION_MISSING_DATA_UNSPECIFIED" - An unspecified evaluation + // missing data option. Equivalent to EVALUATION_MISSING_DATA_NO_OP. + // "EVALUATION_MISSING_DATA_INACTIVE" - If there is no data to + // evaluate the condition, then evaluate the condition as false. + // "EVALUATION_MISSING_DATA_ACTIVE" - If there is no data to evaluate + // the condition, then evaluate the condition as true. + // "EVALUATION_MISSING_DATA_NO_OP" - Do not evaluate the condition to + // any value if there is no data. + EvaluationMissingData string `json:"evaluationMissingData,omitempty"` + + // Query: Monitoring Query Language + // (https://cloud.google.com/monitoring/mql) query that outputs a + // boolean stream. + Query string `json:"query,omitempty"` + + // Trigger: The number/percent of time series for which the comparison + // must hold in order for the condition to trigger. If unspecified, then + // the condition will trigger if the comparison is true for any of the + // time series that have been identified by filter and aggregations, or + // by the ratio, if denominator_filter and denominator_aggregations are + // specified. + Trigger *Trigger `json:"trigger,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Duration") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Duration") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MonitoringQueryLanguageCondition) MarshalJSON() ([]byte, error) { + type NoMethod MonitoringQueryLanguageCondition + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // MutationRecord: Describes a change made to a configuration. type MutationRecord struct { // MutateTime: When the change occurred. @@ -3835,10 +4429,10 @@ type MutationRecord struct { // ForceSendFields is a list of field names (e.g. "MutateTime") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "MutateTime") to include in @@ -3863,6 +4457,9 @@ func (s *MutationRecord) MarshalJSON() ([]byte, error) { // authentication tokens or contact info are only partially populated on // retrieval. type NotificationChannel struct { + // CreationRecord: Record of the creation of this channel. + CreationRecord *MutationRecord `json:"creationRecord,omitempty"` + // Description: An optional human-readable description of this // notification channel. This description may provide additional // details, beyond the display name, for the channel. This may not @@ -3891,11 +4488,12 @@ type NotificationChannel struct { // NotificationChannelDescriptor corresponding to the type field. Labels map[string]string `json:"labels,omitempty"` - // Name: The full REST resource name for this channel. The format - // is: - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] - // - // The [CHANNEL_ID] is automatically assigned by the server on creation. + // MutationRecords: Records of the modification of this channel. + MutationRecords []*MutationRecord `json:"mutationRecords,omitempty"` + + // Name: The full REST resource name for this channel. The format is: + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] The + // [CHANNEL_ID] is automatically assigned by the server on creation. Name string `json:"name,omitempty"` // Type: The type of the notification channel. This field matches the @@ -3944,20 +4542,21 @@ type NotificationChannel struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Description") to + // ForceSendFields is a list of field names (e.g. "CreationRecord") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationRecord") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -4003,8 +4602,8 @@ type NotificationChannelDescriptor struct { // they are cleared for widespread use. By Alpha, all significant design // issues are resolved and we are in the process of verifying // functionality. Alpha customers need to apply for access, agree to - // applicable terms, and have their projects whitelisted. Alpha releases - // don’t have to be feature complete, no SLAs are provided, and there + // applicable terms, and have their projects allowlisted. Alpha releases + // don't have to be feature complete, no SLAs are provided, and there // are no technical support obligations, but they will be far enough // along that customers can actually use them in test environments or // for limited-use tests -- just like they would in normal production @@ -4017,19 +4616,36 @@ type NotificationChannelDescriptor struct { // "GA" - GA features are open to all developers and are considered // stable and fully qualified for production use. // "DEPRECATED" - Deprecated features are scheduled to be shut down - // and removed. For more information, see the “Deprecation Policy” + // and removed. For more information, see the "Deprecation Policy" // section of our Terms of Service (https://cloud.google.com/terms/) and // the Google Cloud Platform Subject to the Deprecation Policy // (https://cloud.google.com/terms/deprecation) documentation. LaunchStage string `json:"launchStage,omitempty"` - // Name: The full REST resource name for this descriptor. The format - // is: - // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TY - // PE] + // Name: The full REST resource name for this descriptor. The format is: + // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE] // In the above, [TYPE] is the value of the type field. Name string `json:"name,omitempty"` + // SupportedTiers: The tiers that support this notification channel; the + // project service tier must be one of the supported_tiers. + // + // Possible values: + // "SERVICE_TIER_UNSPECIFIED" - An invalid sentinel value, used to + // indicate that a tier has not been provided explicitly. + // "SERVICE_TIER_BASIC" - The Cloud Monitoring Basic tier, a free tier + // of service that provides basic features, a moderate allotment of + // logs, and access to built-in metrics. A number of features are not + // available in this tier. For more details, see the service tiers + // documentation (https://cloud.google.com/monitoring/workspaces/tiers). + // "SERVICE_TIER_PREMIUM" - The Cloud Monitoring Premium tier, a + // higher, more expensive tier of service that provides access to all + // Cloud Monitoring features, lets you use Cloud Monitoring with AWS + // accounts, and has a larger allotments for logs and metrics. For more + // details, see the service tiers documentation + // (https://cloud.google.com/monitoring/workspaces/tiers). + SupportedTiers []string `json:"supportedTiers,omitempty"` + // Type: The type of notification channel, such as "email" and "sms". To // view the full list of channels, see Channel descriptors // (https://cloud.google.com/monitoring/alerts/using-channels-api#ncd). @@ -4042,10 +4658,10 @@ type NotificationChannelDescriptor struct { // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Description") to include @@ -4063,6 +4679,77 @@ func (s *NotificationChannelDescriptor) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NotificationRateLimit: Control over the rate of notifications sent to +// this alert policy's notification channels. +type NotificationRateLimit struct { + // Period: Not more than one notification per period. + Period string `json:"period,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Period") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Period") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NotificationRateLimit) MarshalJSON() ([]byte, error) { + type NoMethod NotificationRateLimit + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// OperationMetadata: Contains metadata for longrunning operation for +// the edit Metrics Scope endpoints. +type OperationMetadata struct { + // CreateTime: The time when the batch request was received. + CreateTime string `json:"createTime,omitempty"` + + // State: Current state of the batch operation. + // + // Possible values: + // "STATE_UNSPECIFIED" - Invalid. + // "CREATED" - Request has been received. + // "RUNNING" - Request is actively being processed. + // "DONE" - The batch processing is done. + // "CANCELLED" - The batch processing was cancelled. + State string `json:"state,omitempty"` + + // UpdateTime: The time when the operation result was last updated. + UpdateTime string `json:"updateTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod OperationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Option: A protocol buffer option, which can be attached to a message, // field, enumeration, etc. type Option struct { @@ -4081,10 +4768,10 @@ type Option struct { // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Name") to include in API @@ -4117,10 +4804,10 @@ type PerformanceThreshold struct { // ForceSendFields is a list of field names (e.g. "BasicSliPerformance") // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BasicSliPerformance") to @@ -4153,6 +4840,37 @@ func (s *PerformanceThreshold) UnmarshalJSON(data []byte) error { return nil } +// PingConfig: Information involved in sending ICMP pings alongside +// public HTTP/TCP checks. For HTTP, the pings are performed for each +// part of the redirect chain. +type PingConfig struct { + // PingsCount: Number of ICMP pings. A maximum of 3 ICMP pings is + // currently supported. + PingsCount int64 `json:"pingsCount,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PingsCount") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PingsCount") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PingConfig) MarshalJSON() ([]byte, error) { + type NoMethod PingConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Point: A single data point in a time series. type Point struct { // Interval: The time interval to which the data point applies. For @@ -4171,10 +4889,10 @@ type Point struct { // ForceSendFields is a list of field names (e.g. "Interval") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Interval") to include in @@ -4205,10 +4923,10 @@ type PointData struct { // ForceSendFields is a list of field names (e.g. "TimeInterval") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "TimeInterval") to include @@ -4238,16 +4956,17 @@ type QueryTimeSeriesRequest struct { // previous method call. PageToken string `json:"pageToken,omitempty"` - // Query: Required. The query in the monitoring query language format. - // The default time zone is in UTC. + // Query: Required. The query in the Monitoring Query Language + // (https://cloud.google.com/monitoring/mql/reference) format. The + // default time zone is in UTC. Query string `json:"query,omitempty"` // ForceSendFields is a list of field names (e.g. "PageSize") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "PageSize") to include in @@ -4290,10 +5009,10 @@ type QueryTimeSeriesResponse struct { // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include @@ -4321,10 +5040,10 @@ type Range struct { // ForceSendFields is a list of field names (e.g. "Max") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Max") to include in API @@ -4373,10 +5092,10 @@ type RequestBasedSli struct { // ForceSendFields is a list of field names (e.g. "DistributionCut") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DistributionCut") to @@ -4415,10 +5134,10 @@ type ResourceGroup struct { // ForceSendFields is a list of field names (e.g. "GroupId") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "GroupId") to include in @@ -4436,6 +5155,48 @@ func (s *ResourceGroup) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ResponseStatusCode: A status to accept. Either a status code class +// like "2xx", or an integer status code like "200". +type ResponseStatusCode struct { + // StatusClass: A class of status codes to accept. + // + // Possible values: + // "STATUS_CLASS_UNSPECIFIED" - Default value that matches no status + // codes. + // "STATUS_CLASS_1XX" - The class of status codes between 100 and 199. + // "STATUS_CLASS_2XX" - The class of status codes between 200 and 299. + // "STATUS_CLASS_3XX" - The class of status codes between 300 and 399. + // "STATUS_CLASS_4XX" - The class of status codes between 400 and 499. + // "STATUS_CLASS_5XX" - The class of status codes between 500 and 599. + // "STATUS_CLASS_ANY" - The class of all status codes. + StatusClass string `json:"statusClass,omitempty"` + + // StatusValue: A status code to accept. + StatusValue int64 `json:"statusValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "StatusClass") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "StatusClass") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponseStatusCode) MarshalJSON() ([]byte, error) { + type NoMethod ResponseStatusCode + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SendNotificationChannelVerificationCodeRequest: The // SendNotificationChannelVerificationCode request. type SendNotificationChannelVerificationCodeRequest struct { @@ -4450,9 +5211,18 @@ type MService struct { // AppEngine: Type used for App Engine services. AppEngine *AppEngine `json:"appEngine,omitempty"` + // BasicService: Message that contains the service type and service + // labels of this service if it is a basic service. Documentation and + // examples here + // (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). + BasicService *BasicService `json:"basicService,omitempty"` + // CloudEndpoints: Type used for Cloud Endpoints services. CloudEndpoints *CloudEndpoints `json:"cloudEndpoints,omitempty"` + // CloudRun: Type used for Cloud Run services. + CloudRun *CloudRun `json:"cloudRun,omitempty"` + // ClusterIstio: Type used for Istio services that live in a Kubernetes // cluster. ClusterIstio *ClusterIstio `json:"clusterIstio,omitempty"` @@ -4463,28 +5233,50 @@ type MService struct { // DisplayName: Name used for UI elements listing this Service. DisplayName string `json:"displayName,omitempty"` + // GkeNamespace: Type used for GKE Namespaces. + GkeNamespace *GkeNamespace `json:"gkeNamespace,omitempty"` + + // GkeService: Type used for GKE Services (the Kubernetes concept of a + // service). + GkeService *GkeService `json:"gkeService,omitempty"` + + // GkeWorkload: Type used for GKE Workloads. + GkeWorkload *GkeWorkload `json:"gkeWorkload,omitempty"` + + // IstioCanonicalService: Type used for canonical services scoped to an + // Istio mesh. Metrics for Istio are documented here + // (https://istio.io/latest/docs/reference/config/metrics/) + IstioCanonicalService *IstioCanonicalService `json:"istioCanonicalService,omitempty"` + // MeshIstio: Type used for Istio services scoped to an Istio mesh. MeshIstio *MeshIstio `json:"meshIstio,omitempty"` - // Name: Resource name for this Service. The format - // is: + // Name: Resource name for this Service. The format is: // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] - // Name string `json:"name,omitempty"` // Telemetry: Configuration for how to query telemetry on a Service. Telemetry *Telemetry `json:"telemetry,omitempty"` + // UserLabels: Labels which have been used to annotate the service. + // Label keys must start with a letter. Label keys and values may + // contain lowercase letters, numbers, underscores, and dashes. Label + // keys and values have a maximum length of 63 characters, and must be + // less than 128 bytes in size. Up to 64 label entries may be stored. + // For labels which do not have a semantic value, the empty string may + // be supplied for the label value. + UserLabels map[string]string `json:"userLabels,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "AppEngine") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AppEngine") to include in @@ -4507,8 +5299,7 @@ func (s *MService) MarshalJSON() ([]byte, error) { // well-defined. In such cases, the SLI can be described easily by // referencing the well-known SLI and providing the needed parameters. // Alternatively, a "custom" SLI can be defined with a query to the -// underlying metric store. An SLI is defined to be good_service -// / +// underlying metric store. An SLI is defined to be good_service / // total_service over any queried time interval. The value of // performance always falls into the range 0 <= performance <= 1. A // custom SLI describes how to compute this ratio, whether this is by @@ -4530,10 +5321,10 @@ type ServiceLevelIndicator struct { // ForceSendFields is a list of field names (e.g. "BasicSli") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BasicSli") to include in @@ -4561,8 +5352,8 @@ func (s *ServiceLevelIndicator) MarshalJSON() ([]byte, error) { // successfully." type ServiceLevelObjective struct { // CalendarPeriod: A calendar period, semantically "since the start of - // the current ". At this time, only DAY, WEEK, - // FORTNIGHT, and MONTH are supported. + // the current ". At this time, only DAY, WEEK, FORTNIGHT, and MONTH are + // supported. // // Possible values: // "CALENDAR_PERIOD_UNSPECIFIED" - Undefined period, raises an error. @@ -4586,16 +5377,13 @@ type ServiceLevelObjective struct { // objective to be met. 0 < goal <= 0.999. Goal float64 `json:"goal,omitempty"` - // Name: Resource name for this ServiceLevelObjective. The format - // is: - // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevel - // Objectives/[SLO_NAME] - // + // Name: Resource name for this ServiceLevelObjective. The format is: + // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObje + // ctives/[SLO_NAME] Name string `json:"name,omitempty"` - // RollingPeriod: A rolling time period, semantically "in the past - // ". Must be an integer multiple of 1 day no larger - // than 30 days. + // RollingPeriod: A rolling time period, semantically "in the past ". + // Must be an integer multiple of 1 day no larger than 30 days. RollingPeriod string `json:"rollingPeriod,omitempty"` // ServiceLevelIndicator: The definition of good service, used to @@ -4603,16 +5391,25 @@ type ServiceLevelObjective struct { // respect to a single aspect of service quality. ServiceLevelIndicator *ServiceLevelIndicator `json:"serviceLevelIndicator,omitempty"` + // UserLabels: Labels which have been used to annotate the service-level + // objective. Label keys must start with a letter. Label keys and values + // may contain lowercase letters, numbers, underscores, and dashes. + // Label keys and values have a maximum length of 63 characters, and + // must be less than 128 bytes in size. Up to 64 label entries may be + // stored. For labels which do not have a semantic value, the empty + // string may be supplied for the label value. + UserLabels map[string]string `json:"userLabels,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "CalendarPeriod") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CalendarPeriod") to @@ -4645,6 +5442,57 @@ func (s *ServiceLevelObjective) UnmarshalJSON(data []byte) error { return nil } +// Snooze: A Snooze will prevent any alerts from being opened, and close +// any that are already open. The Snooze will work on alerts that match +// the criteria defined in the Snooze. The Snooze will be active from +// interval.start_time through interval.end_time. +type Snooze struct { + // Criteria: Required. This defines the criteria for applying the + // Snooze. See Criteria for more information. + Criteria *Criteria `json:"criteria,omitempty"` + + // DisplayName: Required. A display name for the Snooze. This can be, at + // most, 512 unicode characters. + DisplayName string `json:"displayName,omitempty"` + + // Interval: Required. The Snooze will be active from + // interval.start_time through interval.end_time. interval.start_time + // cannot be in the past. There is a 15 second clock skew to account for + // the time it takes for a request to reach the API from the UI. + Interval *TimeInterval `json:"interval,omitempty"` + + // Name: Required. The name of the Snooze. The format is: + // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] The ID of the + // Snooze will be generated by the system. + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Criteria") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Criteria") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Snooze) MarshalJSON() ([]byte, error) { + type NoMethod Snooze + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SourceContext: SourceContext represents information about the source // of a protobuf element, like the file in which it is defined. type SourceContext struct { @@ -4655,10 +5503,10 @@ type SourceContext struct { // ForceSendFields is a list of field names (e.g. "FileName") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "FileName") to include in @@ -4676,18 +5524,13 @@ func (s *SourceContext) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SpanContext: The context of a span, attached to Exemplars in -// Distribution values during aggregation.It contains the name of a span -// with -// format: -// projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ -// ID] -// +// SpanContext: The context of a span. This is attached to an Exemplar +// in Distribution values during aggregation.It contains the name of a +// span with format: +// projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] type SpanContext struct { - // SpanName: The resource name of the span. The format - // is: + // SpanName: The resource name of the span. The format is: // projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID] - // // [TRACE_ID] is a unique identifier for a trace within a project; it is // a 32-character hexadecimal encoding of a 16-byte array.[SPAN_ID] is a // unique identifier for a span within a trace; it is a 16-character @@ -4696,10 +5539,10 @@ type SpanContext struct { // ForceSendFields is a list of field names (e.g. "SpanName") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "SpanName") to include in @@ -4740,10 +5583,10 @@ type Status struct { // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Code") to include in API @@ -4763,21 +5606,24 @@ func (s *Status) MarshalJSON() ([]byte, error) { // TcpCheck: Information required for a TCP Uptime check request. type TcpCheck struct { + // PingConfig: Contains information needed to add pings to a TCP check. + PingConfig *PingConfig `json:"pingConfig,omitempty"` + // Port: The TCP port on the server against which to run the check. Will // be combined with host (specified within the monitored_resource) to // construct the full URL. Required. Port int64 `json:"port,omitempty"` - // ForceSendFields is a list of field names (e.g. "Port") to + // ForceSendFields is a list of field names (e.g. "PingConfig") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "PingConfig") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -4800,10 +5646,10 @@ type Telemetry struct { // ForceSendFields is a list of field names (e.g. "ResourceName") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ResourceName") to include @@ -4821,33 +5667,39 @@ func (s *Telemetry) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TimeInterval: A closed time interval. It extends from the start time -// to the end time, and includes both: [startTime, endTime]. Valid time -// intervals depend on the MetricKind of the metric value. In no case -// can the end time be earlier than the start time. -// For GAUGE metrics, the startTime value is technically optional; if -// no value is specified, the start time defaults to the value of the -// end time, and the interval represents a single point in time. If both -// start and end times are specified, they must be identical. Such an -// interval is valid only for GAUGE metrics, which are point-in-time -// measurements. The end time of a new interval must be at least a -// millisecond after the end time of the previous interval. -// For DELTA metrics, the start time and end time must specify a -// non-zero interval, with subsequent points specifying contiguous and -// non-overlapping intervals. For DELTA metrics, the start time of the -// next interval must be at least a millisecond after the end time of -// the previous interval. -// For CUMULATIVE metrics, the start time and end time must specify a a -// non-zero interval, with subsequent points specifying the same start -// time and increasing end times, until an event resets the cumulative -// value to zero and sets a new start time for the following points. -// The new start time must be at least a millisecond after the end time -// of the previous interval. -// The start time of a new interval must be at least a millisecond after -// the end time of the previous interval because intervals are closed. -// If the start time of a new interval is the same as the end time of -// the previous interval, then data written at the new start time could -// overwrite data written at the previous end time. +// TimeInterval: Describes a time interval: Reads: A half-open time +// interval. It includes the end time but excludes the start time: +// (startTime, endTime]. The start time must be specified, must be +// earlier than the end time, and should be no older than the data +// retention period for the metric. Writes: A closed time interval. It +// extends from the start time to the end time, and includes both: +// [startTime, endTime]. Valid time intervals depend on the MetricKind +// (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind) +// of the metric value. The end time must not be earlier than the start +// time, and the end time must not be more than 25 hours in the past or +// more than five minutes in the future. For GAUGE metrics, the +// startTime value is technically optional; if no value is specified, +// the start time defaults to the value of the end time, and the +// interval represents a single point in time. If both start and end +// times are specified, they must be identical. Such an interval is +// valid only for GAUGE metrics, which are point-in-time measurements. +// The end time of a new interval must be at least a millisecond after +// the end time of the previous interval. For DELTA metrics, the start +// time and end time must specify a non-zero interval, with subsequent +// points specifying contiguous and non-overlapping intervals. For DELTA +// metrics, the start time of the next interval must be at least a +// millisecond after the end time of the previous interval. For +// CUMULATIVE metrics, the start time and end time must specify a +// non-zero interval, with subsequent points specifying the same start +// time and increasing end times, until an event resets the cumulative +// value to zero and sets a new start time for the following points. The +// new start time must be at least a millisecond after the end time of +// the previous interval. The start time of a new interval must be at +// least a millisecond after the end time of the previous interval +// because intervals are closed. If the start time of a new interval is +// the same as the end time of the previous interval, then data written +// at the new start time could overwrite data written at the previous +// end time. type TimeInterval struct { // EndTime: Required. The end of the time interval. EndTime string `json:"endTime,omitempty"` @@ -4859,10 +5711,10 @@ type TimeInterval struct { // ForceSendFields is a list of field names (e.g. "EndTime") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "EndTime") to include in @@ -4925,7 +5777,9 @@ type TimeSeries struct { Points []*Point `json:"points,omitempty"` // Resource: The associated monitored resource. Custom metrics can use - // only certain monitored resource types in their time series data. + // only certain monitored resource types in their time series data. For + // more information, see Monitored resources for custom metrics + // (https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources). Resource *MonitoredResource `json:"resource,omitempty"` // Unit: The units in which the metric value is reported. It is only @@ -4954,10 +5808,10 @@ type TimeSeries struct { // ForceSendFields is a list of field names (e.g. "Metadata") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Metadata") to include in @@ -4990,10 +5844,10 @@ type TimeSeriesData struct { // ForceSendFields is a list of field names (e.g. "LabelValues") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "LabelValues") to include @@ -5022,10 +5876,10 @@ type TimeSeriesDescriptor struct { // ForceSendFields is a list of field names (e.g. "LabelDescriptors") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "LabelDescriptors") to @@ -5047,11 +5901,10 @@ func (s *TimeSeriesDescriptor) MarshalJSON() ([]byte, error) { // TimeSeriesRatio: A TimeSeriesRatio specifies two TimeSeries to use // for computing the good_service / total_service ratio. The specified // TimeSeries must have ValueType = DOUBLE or ValueType = INT64 and must -// have MetricKind = -// DELTA or MetricKind = CUMULATIVE. The TimeSeriesRatio must specify -// exactly two of good, bad, and total, and the relationship -// good_service + -// bad_service = total_service will be assumed. +// have MetricKind = DELTA or MetricKind = CUMULATIVE. The +// TimeSeriesRatio must specify exactly two of good, bad, and total, and +// the relationship good_service + bad_service = total_service will be +// assumed. type TimeSeriesRatio struct { // BadServiceFilter: A monitoring filter // (https://cloud.google.com/monitoring/api/v3/filters) specifying a @@ -5064,23 +5917,23 @@ type TimeSeriesRatio struct { // GoodServiceFilter: A monitoring filter // (https://cloud.google.com/monitoring/api/v3/filters) specifying a // TimeSeries quantifying good service provided. Must have ValueType = - // DOUBLE or ValueType = INT64 and must have MetricKind = - // DELTA or MetricKind = CUMULATIVE. + // DOUBLE or ValueType = INT64 and must have MetricKind = DELTA or + // MetricKind = CUMULATIVE. GoodServiceFilter string `json:"goodServiceFilter,omitempty"` // TotalServiceFilter: A monitoring filter // (https://cloud.google.com/monitoring/api/v3/filters) specifying a // TimeSeries quantifying total demanded service. Must have ValueType = - // DOUBLE or ValueType = INT64 and must have MetricKind = - // DELTA or MetricKind = CUMULATIVE. + // DOUBLE or ValueType = INT64 and must have MetricKind = DELTA or + // MetricKind = CUMULATIVE. TotalServiceFilter string `json:"totalServiceFilter,omitempty"` // ForceSendFields is a list of field names (e.g. "BadServiceFilter") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BadServiceFilter") to @@ -5113,10 +5966,10 @@ type Trigger struct { // ForceSendFields is a list of field names (e.g. "Count") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Count") to include in API @@ -5175,10 +6028,10 @@ type Type struct { // ForceSendFields is a list of field names (e.g. "Fields") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Fields") to include in API @@ -5205,12 +6058,11 @@ type TypedValue struct { DistributionValue *Distribution `json:"distributionValue,omitempty"` // DoubleValue: A 64-bit double-precision floating-point number. Its - // magnitude is approximately ±10±300 and it - // has 16 significant digits of precision. + // magnitude is approximately ±10±300 and it has 16 significant digits + // of precision. DoubleValue *float64 `json:"doubleValue,omitempty"` - // Int64Value: A 64-bit integer. Its range is approximately - // ±9.2x1018. + // Int64Value: A 64-bit integer. Its range is approximately ±9.2x1018. Int64Value *int64 `json:"int64Value,omitempty,string"` // StringValue: A variable-length string value. @@ -5218,10 +6070,10 @@ type TypedValue struct { // ForceSendFields is a list of field names (e.g. "BoolValue") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "BoolValue") to include in @@ -5258,6 +6110,22 @@ func (s *TypedValue) UnmarshalJSON(data []byte) error { // UptimeCheckConfig: This message configures which resources and // services to monitor for availability. type UptimeCheckConfig struct { + // CheckerType: The type of checkers to use to execute the Uptime check. + // + // Possible values: + // "CHECKER_TYPE_UNSPECIFIED" - The default checker type. Currently + // converted to STATIC_IP_CHECKERS on creation, the default conversion + // behavior may change in the future. + // "STATIC_IP_CHECKERS" - STATIC_IP_CHECKERS are used for uptime + // checks that perform egress across the public internet. + // STATIC_IP_CHECKERS use the static IP addresses returned by + // ListUptimeCheckIps. + // "VPC_CHECKERS" - VPC_CHECKERS are used for uptime checks that + // perform egress using Service Directory and private network access. + // When using VPC_CHECKERS, the monitored resource type must be + // servicedirectory_service. + CheckerType string `json:"checkerType,omitempty"` + // ContentMatchers: The content that is expected to appear in the data // returned by the target server against which the check is run. // Currently, only the first entry in the content_matchers list is @@ -5267,9 +6135,9 @@ type UptimeCheckConfig struct { ContentMatchers []*ContentMatcher `json:"contentMatchers,omitempty"` // DisplayName: A human-friendly name for the Uptime check - // configuration. The display name should be unique within a Stackdriver - // Workspace in order to make it easier to identify; however, uniqueness - // is not enforced. Required. + // configuration. The display name should be unique within a Cloud + // Monitoring Workspace in order to make it easier to identify; however, + // uniqueness is not enforced. Required. DisplayName string `json:"displayName,omitempty"` // HttpCheck: Contains information needed to make an HTTP or HTTPS @@ -5291,19 +6159,19 @@ type UptimeCheckConfig struct { // MonitoredResource: The monitored resource // (https://cloud.google.com/monitoring/api/resources) associated with - // the configuration. The following monitored resource types are - // supported for Uptime checks: uptime_url, gce_instance, gae_app, - // aws_ec2_instance, aws_elb_load_balancer + // the configuration. The following monitored resource types are valid + // for this field: uptime_url, gce_instance, gae_app, aws_ec2_instance, + // aws_elb_load_balancer k8s_service servicedirectory_service + // cloud_run_revision MonitoredResource *MonitoredResource `json:"monitoredResource,omitempty"` // Name: A unique resource name for this Uptime check configuration. The // format is: - // // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] - // T - // his field should be omitted when creating the Uptime check - // configuration; on create, the resource name is assigned by the server - // and included in the response. + // [PROJECT_ID_OR_NUMBER] is the Workspace host project associated with + // the Uptime check.This field should be omitted when creating the + // Uptime check configuration; on create, the resource name is assigned + // by the server and included in the response. Name string `json:"name,omitempty"` // Period: How often, in seconds, the Uptime check is performed. @@ -5332,6 +6200,12 @@ type UptimeCheckConfig struct { // continent of South America. // "ASIA_PACIFIC" - Allows checks to run from locations within the // Asia Pacific area (ex: Singapore). + // "USA_OREGON" - Allows checks to run from locations within the + // western United States of America + // "USA_IOWA" - Allows checks to run from locations within the central + // United States of America + // "USA_VIRGINIA" - Allows checks to run from locations within the + // eastern United States of America SelectedRegions []string `json:"selectedRegions,omitempty"` // TcpCheck: Contains information needed to make a TCP check. @@ -5341,25 +6215,32 @@ type UptimeCheckConfig struct { // complete (must be between 1 and 60 seconds). Required. Timeout string `json:"timeout,omitempty"` + // UserLabels: User-supplied key/value data to be used for organizing + // and identifying the UptimeCheckConfig objects.The field can contain + // up to 64 entries. Each key and value is limited to 63 Unicode + // characters or 128 bytes, whichever is smaller. Labels and values can + // contain only lowercase letters, numerals, underscores, and dashes. + // Keys must begin with a letter. + UserLabels map[string]string `json:"userLabels,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "ContentMatchers") to + // ForceSendFields is a list of field names (e.g. "CheckerType") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ContentMatchers") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "CheckerType") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -5399,14 +6280,20 @@ type UptimeCheckIp struct { // continent of South America. // "ASIA_PACIFIC" - Allows checks to run from locations within the // Asia Pacific area (ex: Singapore). + // "USA_OREGON" - Allows checks to run from locations within the + // western United States of America + // "USA_IOWA" - Allows checks to run from locations within the central + // United States of America + // "USA_VIRGINIA" - Allows checks to run from locations within the + // eastern United States of America Region string `json:"region,omitempty"` // ForceSendFields is a list of field names (e.g. "IpAddress") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "IpAddress") to include in @@ -5463,10 +6350,10 @@ type ValueDescriptor struct { // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Key") to include in API @@ -5499,10 +6386,10 @@ type VerifyNotificationChannelRequest struct { // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Code") to include in API @@ -5549,10 +6436,10 @@ type WindowsBasedSli struct { // ForceSendFields is a list of field names (e.g. "GoodBadMetricFilter") // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "GoodBadMetricFilter") to @@ -5571,7 +6458,2195 @@ func (s *WindowsBasedSli) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// method id "monitoring.projects.alertPolicies.create": +// method id "monitoring.folders.timeSeries.list": + +type FoldersTimeSeriesListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists time series that match a filter. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name), +// organization or folder on which to execute the request. The format +// is: projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID] +// folders/[FOLDER_ID]. +func (r *FoldersTimeSeriesService) List(name string) *FoldersTimeSeriesListCall { + c := &FoldersTimeSeriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// AggregationAlignmentPeriod sets the optional parameter +// "aggregation.alignmentPeriod": The alignment_period specifies a time +// interval, in seconds, that is used to divide the data in all the time +// series into consistent blocks of time. This will be done before the +// per-series aligner can be applied to the data.The value must be at +// least 60 seconds. If a per-series aligner other than ALIGN_NONE is +// specified, this field is required or an error is returned. If no +// per-series aligner is specified, or the aligner ALIGN_NONE is +// specified, then this field is ignored.The maximum value of the +// alignment_period is 104 weeks (2 years) for charts, and 90,000 +// seconds (25 hours) for alerting policies. +func (c *FoldersTimeSeriesListCall) AggregationAlignmentPeriod(aggregationAlignmentPeriod string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("aggregation.alignmentPeriod", aggregationAlignmentPeriod) + return c +} + +// AggregationCrossSeriesReducer sets the optional parameter +// "aggregation.crossSeriesReducer": The reduction operation to be used +// to combine time series into a single time series, where the value of +// each data point in the resulting series is a function of all the +// already aligned values in the input time series.Not all reducer +// operations can be applied to all time series. The valid choices +// depend on the metric_kind and the value_type of the original time +// series. Reduction can yield a time series with a different +// metric_kind or value_type than the input time series.Time series data +// must first be aligned (see per_series_aligner) in order to perform +// cross-time series reduction. If cross_series_reducer is specified, +// then per_series_aligner must be specified, and must not be +// ALIGN_NONE. An alignment_period must also be specified; otherwise, an +// error is returned. +// +// Possible values: +// +// "REDUCE_NONE" - No cross-time series reduction. The output of the +// +// Aligner is returned. +// +// "REDUCE_MEAN" - Reduce by computing the mean value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric or distribution values. The value_type of +// the output is DOUBLE. +// +// "REDUCE_MIN" - Reduce by computing the minimum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_MAX" - Reduce by computing the maximum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_SUM" - Reduce by computing the sum across time series for +// +// each alignment period. This reducer is valid for DELTA and GAUGE +// metrics with numeric and distribution values. The value_type of the +// output is the same as the value_type of the input. +// +// "REDUCE_STDDEV" - Reduce by computing the standard deviation across +// +// time series for each alignment period. This reducer is valid for +// DELTA and GAUGE metrics with numeric or distribution values. The +// value_type of the output is DOUBLE. +// +// "REDUCE_COUNT" - Reduce by computing the number of data points +// +// across time series for each alignment period. This reducer is valid +// for DELTA and GAUGE metrics of numeric, Boolean, distribution, and +// string value_type. The value_type of the output is INT64. +// +// "REDUCE_COUNT_TRUE" - Reduce by computing the number of True-valued +// +// data points across time series for each alignment period. This +// reducer is valid for DELTA and GAUGE metrics of Boolean value_type. +// The value_type of the output is INT64. +// +// "REDUCE_COUNT_FALSE" - Reduce by computing the number of +// +// False-valued data points across time series for each alignment +// period. This reducer is valid for DELTA and GAUGE metrics of Boolean +// value_type. The value_type of the output is INT64. +// +// "REDUCE_FRACTION_TRUE" - Reduce by computing the ratio of the +// +// number of True-valued data points to the total number of data points +// for each alignment period. This reducer is valid for DELTA and GAUGE +// metrics of Boolean value_type. The output value is in the range 0.0, +// 1.0 and has value_type DOUBLE. +// +// "REDUCE_PERCENTILE_99" - Reduce by computing the 99th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_95" - Reduce by computing the 95th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_50" - Reduce by computing the 50th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_05" - Reduce by computing the 5th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +func (c *FoldersTimeSeriesListCall) AggregationCrossSeriesReducer(aggregationCrossSeriesReducer string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("aggregation.crossSeriesReducer", aggregationCrossSeriesReducer) + return c +} + +// AggregationGroupByFields sets the optional parameter +// "aggregation.groupByFields": The set of fields to preserve when +// cross_series_reducer is specified. The group_by_fields determine how +// the time series are partitioned into subsets prior to applying the +// aggregation operation. Each subset contains time series that have the +// same value for each of the grouping fields. Each individual time +// series is a member of exactly one subset. The cross_series_reducer is +// applied to each subset of time series. It is not possible to reduce +// across different resource types, so this field implicitly contains +// resource.type. Fields not specified in group_by_fields are aggregated +// away. If group_by_fields is not specified and all the time series +// have the same resource type, then the time series are aggregated into +// a single output time series. If cross_series_reducer is not defined, +// this field is ignored. +func (c *FoldersTimeSeriesListCall) AggregationGroupByFields(aggregationGroupByFields ...string) *FoldersTimeSeriesListCall { + c.urlParams_.SetMulti("aggregation.groupByFields", append([]string{}, aggregationGroupByFields...)) + return c +} + +// AggregationPerSeriesAligner sets the optional parameter +// "aggregation.perSeriesAligner": An Aligner describes how to bring the +// data points in a single time series into temporal alignment. Except +// for ALIGN_NONE, all alignments cause all the data points in an +// alignment_period to be mathematically grouped together, resulting in +// a single data point for each alignment_period with end timestamp at +// the end of the period.Not all alignment operations may be applied to +// all time series. The valid choices depend on the metric_kind and +// value_type of the original time series. Alignment can change the +// metric_kind or the value_type of the time series.Time series data +// must be aligned in order to perform cross-time series reduction. If +// cross_series_reducer is specified, then per_series_aligner must be +// specified and not equal to ALIGN_NONE and alignment_period must be +// specified; otherwise, an error is returned. +// +// Possible values: +// +// "ALIGN_NONE" - No alignment. Raw data is returned. Not valid if +// +// cross-series reduction is requested. The value_type of the result is +// the same as the value_type of the input. +// +// "ALIGN_DELTA" - Align and convert to DELTA. The output is delta = +// +// y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If +// the selected alignment period results in periods with no data, then +// the aligned value for such a period is created by interpolation. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_RATE" - Align and convert to a rate. The result is computed +// +// as rate = (y1 - y0)/(t1 - t0), or "delta over time". Think of this +// aligner as providing the slope of the line that passes through the +// value at the start and at the end of the alignment_period.This +// aligner is valid for CUMULATIVE and DELTA metrics with numeric +// values. If the selected alignment period results in periods with no +// data, then the aligned value for such a period is created by +// interpolation. The output is a GAUGE metric with value_type +// DOUBLE.If, by "rate", you mean "percentage change", see the +// ALIGN_PERCENT_CHANGE aligner instead. +// +// "ALIGN_INTERPOLATE" - Align by interpolating between adjacent +// +// points around the alignment period boundary. This aligner is valid +// for GAUGE metrics with numeric values. The value_type of the aligned +// result is the same as the value_type of the input. +// +// "ALIGN_NEXT_OLDER" - Align by moving the most recent data point +// +// before the end of the alignment period to the boundary at the end of +// the alignment period. This aligner is valid for GAUGE metrics. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_MIN" - Align the time series by returning the minimum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MAX" - Align the time series by returning the maximum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MEAN" - Align the time series by returning the mean value in +// +// each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// DOUBLE. +// +// "ALIGN_COUNT" - Align the time series by returning the number of +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric or Boolean values. The value_type of the +// aligned result is INT64. +// +// "ALIGN_SUM" - Align the time series by returning the sum of the +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric and distribution values. The value_type of +// the aligned result is the same as the value_type of the input. +// +// "ALIGN_STDDEV" - Align the time series by returning the standard +// +// deviation of the values in each alignment period. This aligner is +// valid for GAUGE and DELTA metrics with numeric values. The value_type +// of the output is DOUBLE. +// +// "ALIGN_COUNT_TRUE" - Align the time series by returning the number +// +// of True values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_COUNT_FALSE" - Align the time series by returning the number +// +// of False values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_FRACTION_TRUE" - Align the time series by returning the +// +// ratio of the number of True values to the total number of values in +// each alignment period. This aligner is valid for GAUGE metrics with +// Boolean values. The output value is in the range 0.0, 1.0 and has +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_99" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 99th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_95" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 95th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_50" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 50th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_05" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 5th percentile of all data +// points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENT_CHANGE" - Align and convert to a percentage change. +// +// This aligner is valid for GAUGE and DELTA metrics with numeric +// values. This alignment returns ((current - previous)/previous) * 100, +// where the value of previous is determined based on the +// alignment_period.If the values of current and previous are both 0, +// then the returned value is 0. If only previous is 0, the returned +// value is infinity.A 10-minute moving mean is computed at each point +// of the alignment period prior to the above calculation to smooth the +// metric and prevent false positives from very short-lived spikes. The +// moving mean is only applicable for data whose values are >= 0. Any +// values < 0 are treated as a missing datapoint, and are ignored. While +// DELTA metrics are accepted by this alignment, special care should be +// taken that the values for the metric will always be positive. The +// output is a GAUGE metric with value_type DOUBLE. +func (c *FoldersTimeSeriesListCall) AggregationPerSeriesAligner(aggregationPerSeriesAligner string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("aggregation.perSeriesAligner", aggregationPerSeriesAligner) + return c +} + +// Filter sets the optional parameter "filter": Required. A monitoring +// filter (https://cloud.google.com/monitoring/api/v3/filters) that +// specifies which time series should be returned. The filter must +// specify a single metric type, and can additionally specify metric +// labels and other information. For example: metric.type = +// "compute.googleapis.com/instance/cpu/usage_time" AND +// metric.labels.instance_name = "my-instance-name" +func (c *FoldersTimeSeriesListCall) Filter(filter string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IntervalEndTime sets the optional parameter "interval.endTime": +// Required. The end of the time interval. +func (c *FoldersTimeSeriesListCall) IntervalEndTime(intervalEndTime string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("interval.endTime", intervalEndTime) + return c +} + +// IntervalStartTime sets the optional parameter "interval.startTime": +// The beginning of the time interval. The default value for the start +// time is the end time. The start time must not be later than the end +// time. +func (c *FoldersTimeSeriesListCall) IntervalStartTime(intervalStartTime string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("interval.startTime", intervalStartTime) + return c +} + +// OrderBy sets the optional parameter "orderBy": Unsupported: must be +// left blank. The points in each time series are currently returned in +// reverse time order (most recent to oldest). +func (c *FoldersTimeSeriesListCall) OrderBy(orderBy string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": A positive number +// that is the maximum number of results to return. If page_size is +// empty or more than 100,000 results, the effective page_size is +// 100,000 results. If view is set to FULL, this is the maximum number +// of Points returned. If view is set to HEADERS, this is the maximum +// number of TimeSeries returned. +func (c *FoldersTimeSeriesListCall) PageSize(pageSize int64) *FoldersTimeSeriesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If this field is +// not empty then it must contain the nextPageToken value returned by a +// previous call to this method. Using this field causes the method to +// return additional results from the previous method call. +func (c *FoldersTimeSeriesListCall) PageToken(pageToken string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// SecondaryAggregationAlignmentPeriod sets the optional parameter +// "secondaryAggregation.alignmentPeriod": The alignment_period +// specifies a time interval, in seconds, that is used to divide the +// data in all the time series into consistent blocks of time. This will +// be done before the per-series aligner can be applied to the data.The +// value must be at least 60 seconds. If a per-series aligner other than +// ALIGN_NONE is specified, this field is required or an error is +// returned. If no per-series aligner is specified, or the aligner +// ALIGN_NONE is specified, then this field is ignored.The maximum value +// of the alignment_period is 104 weeks (2 years) for charts, and 90,000 +// seconds (25 hours) for alerting policies. +func (c *FoldersTimeSeriesListCall) SecondaryAggregationAlignmentPeriod(secondaryAggregationAlignmentPeriod string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("secondaryAggregation.alignmentPeriod", secondaryAggregationAlignmentPeriod) + return c +} + +// SecondaryAggregationCrossSeriesReducer sets the optional parameter +// "secondaryAggregation.crossSeriesReducer": The reduction operation to +// be used to combine time series into a single time series, where the +// value of each data point in the resulting series is a function of all +// the already aligned values in the input time series.Not all reducer +// operations can be applied to all time series. The valid choices +// depend on the metric_kind and the value_type of the original time +// series. Reduction can yield a time series with a different +// metric_kind or value_type than the input time series.Time series data +// must first be aligned (see per_series_aligner) in order to perform +// cross-time series reduction. If cross_series_reducer is specified, +// then per_series_aligner must be specified, and must not be +// ALIGN_NONE. An alignment_period must also be specified; otherwise, an +// error is returned. +// +// Possible values: +// +// "REDUCE_NONE" - No cross-time series reduction. The output of the +// +// Aligner is returned. +// +// "REDUCE_MEAN" - Reduce by computing the mean value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric or distribution values. The value_type of +// the output is DOUBLE. +// +// "REDUCE_MIN" - Reduce by computing the minimum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_MAX" - Reduce by computing the maximum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_SUM" - Reduce by computing the sum across time series for +// +// each alignment period. This reducer is valid for DELTA and GAUGE +// metrics with numeric and distribution values. The value_type of the +// output is the same as the value_type of the input. +// +// "REDUCE_STDDEV" - Reduce by computing the standard deviation across +// +// time series for each alignment period. This reducer is valid for +// DELTA and GAUGE metrics with numeric or distribution values. The +// value_type of the output is DOUBLE. +// +// "REDUCE_COUNT" - Reduce by computing the number of data points +// +// across time series for each alignment period. This reducer is valid +// for DELTA and GAUGE metrics of numeric, Boolean, distribution, and +// string value_type. The value_type of the output is INT64. +// +// "REDUCE_COUNT_TRUE" - Reduce by computing the number of True-valued +// +// data points across time series for each alignment period. This +// reducer is valid for DELTA and GAUGE metrics of Boolean value_type. +// The value_type of the output is INT64. +// +// "REDUCE_COUNT_FALSE" - Reduce by computing the number of +// +// False-valued data points across time series for each alignment +// period. This reducer is valid for DELTA and GAUGE metrics of Boolean +// value_type. The value_type of the output is INT64. +// +// "REDUCE_FRACTION_TRUE" - Reduce by computing the ratio of the +// +// number of True-valued data points to the total number of data points +// for each alignment period. This reducer is valid for DELTA and GAUGE +// metrics of Boolean value_type. The output value is in the range 0.0, +// 1.0 and has value_type DOUBLE. +// +// "REDUCE_PERCENTILE_99" - Reduce by computing the 99th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_95" - Reduce by computing the 95th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_50" - Reduce by computing the 50th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_05" - Reduce by computing the 5th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +func (c *FoldersTimeSeriesListCall) SecondaryAggregationCrossSeriesReducer(secondaryAggregationCrossSeriesReducer string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("secondaryAggregation.crossSeriesReducer", secondaryAggregationCrossSeriesReducer) + return c +} + +// SecondaryAggregationGroupByFields sets the optional parameter +// "secondaryAggregation.groupByFields": The set of fields to preserve +// when cross_series_reducer is specified. The group_by_fields determine +// how the time series are partitioned into subsets prior to applying +// the aggregation operation. Each subset contains time series that have +// the same value for each of the grouping fields. Each individual time +// series is a member of exactly one subset. The cross_series_reducer is +// applied to each subset of time series. It is not possible to reduce +// across different resource types, so this field implicitly contains +// resource.type. Fields not specified in group_by_fields are aggregated +// away. If group_by_fields is not specified and all the time series +// have the same resource type, then the time series are aggregated into +// a single output time series. If cross_series_reducer is not defined, +// this field is ignored. +func (c *FoldersTimeSeriesListCall) SecondaryAggregationGroupByFields(secondaryAggregationGroupByFields ...string) *FoldersTimeSeriesListCall { + c.urlParams_.SetMulti("secondaryAggregation.groupByFields", append([]string{}, secondaryAggregationGroupByFields...)) + return c +} + +// SecondaryAggregationPerSeriesAligner sets the optional parameter +// "secondaryAggregation.perSeriesAligner": An Aligner describes how to +// bring the data points in a single time series into temporal +// alignment. Except for ALIGN_NONE, all alignments cause all the data +// points in an alignment_period to be mathematically grouped together, +// resulting in a single data point for each alignment_period with end +// timestamp at the end of the period.Not all alignment operations may +// be applied to all time series. The valid choices depend on the +// metric_kind and value_type of the original time series. Alignment can +// change the metric_kind or the value_type of the time series.Time +// series data must be aligned in order to perform cross-time series +// reduction. If cross_series_reducer is specified, then +// per_series_aligner must be specified and not equal to ALIGN_NONE and +// alignment_period must be specified; otherwise, an error is returned. +// +// Possible values: +// +// "ALIGN_NONE" - No alignment. Raw data is returned. Not valid if +// +// cross-series reduction is requested. The value_type of the result is +// the same as the value_type of the input. +// +// "ALIGN_DELTA" - Align and convert to DELTA. The output is delta = +// +// y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If +// the selected alignment period results in periods with no data, then +// the aligned value for such a period is created by interpolation. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_RATE" - Align and convert to a rate. The result is computed +// +// as rate = (y1 - y0)/(t1 - t0), or "delta over time". Think of this +// aligner as providing the slope of the line that passes through the +// value at the start and at the end of the alignment_period.This +// aligner is valid for CUMULATIVE and DELTA metrics with numeric +// values. If the selected alignment period results in periods with no +// data, then the aligned value for such a period is created by +// interpolation. The output is a GAUGE metric with value_type +// DOUBLE.If, by "rate", you mean "percentage change", see the +// ALIGN_PERCENT_CHANGE aligner instead. +// +// "ALIGN_INTERPOLATE" - Align by interpolating between adjacent +// +// points around the alignment period boundary. This aligner is valid +// for GAUGE metrics with numeric values. The value_type of the aligned +// result is the same as the value_type of the input. +// +// "ALIGN_NEXT_OLDER" - Align by moving the most recent data point +// +// before the end of the alignment period to the boundary at the end of +// the alignment period. This aligner is valid for GAUGE metrics. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_MIN" - Align the time series by returning the minimum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MAX" - Align the time series by returning the maximum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MEAN" - Align the time series by returning the mean value in +// +// each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// DOUBLE. +// +// "ALIGN_COUNT" - Align the time series by returning the number of +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric or Boolean values. The value_type of the +// aligned result is INT64. +// +// "ALIGN_SUM" - Align the time series by returning the sum of the +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric and distribution values. The value_type of +// the aligned result is the same as the value_type of the input. +// +// "ALIGN_STDDEV" - Align the time series by returning the standard +// +// deviation of the values in each alignment period. This aligner is +// valid for GAUGE and DELTA metrics with numeric values. The value_type +// of the output is DOUBLE. +// +// "ALIGN_COUNT_TRUE" - Align the time series by returning the number +// +// of True values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_COUNT_FALSE" - Align the time series by returning the number +// +// of False values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_FRACTION_TRUE" - Align the time series by returning the +// +// ratio of the number of True values to the total number of values in +// each alignment period. This aligner is valid for GAUGE metrics with +// Boolean values. The output value is in the range 0.0, 1.0 and has +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_99" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 99th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_95" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 95th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_50" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 50th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_05" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 5th percentile of all data +// points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENT_CHANGE" - Align and convert to a percentage change. +// +// This aligner is valid for GAUGE and DELTA metrics with numeric +// values. This alignment returns ((current - previous)/previous) * 100, +// where the value of previous is determined based on the +// alignment_period.If the values of current and previous are both 0, +// then the returned value is 0. If only previous is 0, the returned +// value is infinity.A 10-minute moving mean is computed at each point +// of the alignment period prior to the above calculation to smooth the +// metric and prevent false positives from very short-lived spikes. The +// moving mean is only applicable for data whose values are >= 0. Any +// values < 0 are treated as a missing datapoint, and are ignored. While +// DELTA metrics are accepted by this alignment, special care should be +// taken that the values for the metric will always be positive. The +// output is a GAUGE metric with value_type DOUBLE. +func (c *FoldersTimeSeriesListCall) SecondaryAggregationPerSeriesAligner(secondaryAggregationPerSeriesAligner string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("secondaryAggregation.perSeriesAligner", secondaryAggregationPerSeriesAligner) + return c +} + +// View sets the optional parameter "view": Required. Specifies which +// information is returned about the time series. +// +// Possible values: +// +// "FULL" - Returns the identity of the metric(s), the time series, +// +// and the time series data. +// +// "HEADERS" - Returns the identity of the metric and the time series +// +// resource, but not the time series data. +func (c *FoldersTimeSeriesListCall) View(view string) *FoldersTimeSeriesListCall { + c.urlParams_.Set("view", view) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersTimeSeriesListCall) Fields(s ...googleapi.Field) *FoldersTimeSeriesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FoldersTimeSeriesListCall) IfNoneMatch(entityTag string) *FoldersTimeSeriesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersTimeSeriesListCall) Context(ctx context.Context) *FoldersTimeSeriesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersTimeSeriesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersTimeSeriesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/timeSeries") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.folders.timeSeries.list" call. +// Exactly one of *ListTimeSeriesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListTimeSeriesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *FoldersTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTimeSeriesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListTimeSeriesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists time series that match a filter.", + // "flatPath": "v3/folders/{foldersId}/timeSeries", + // "httpMethod": "GET", + // "id": "monitoring.folders.timeSeries.list", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "aggregation.alignmentPeriod": { + // "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", + // "format": "google-duration", + // "location": "query", + // "type": "string" + // }, + // "aggregation.crossSeriesReducer": { + // "description": "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.", + // "enum": [ + // "REDUCE_NONE", + // "REDUCE_MEAN", + // "REDUCE_MIN", + // "REDUCE_MAX", + // "REDUCE_SUM", + // "REDUCE_STDDEV", + // "REDUCE_COUNT", + // "REDUCE_COUNT_TRUE", + // "REDUCE_COUNT_FALSE", + // "REDUCE_FRACTION_TRUE", + // "REDUCE_PERCENTILE_99", + // "REDUCE_PERCENTILE_95", + // "REDUCE_PERCENTILE_50", + // "REDUCE_PERCENTILE_05" + // ], + // "enumDescriptions": [ + // "No cross-time series reduction. The output of the Aligner is returned.", + // "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + // ], + // "location": "query", + // "type": "string" + // }, + // "aggregation.groupByFields": { + // "description": "The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.", + // "location": "query", + // "repeated": true, + // "type": "string" + // }, + // "aggregation.perSeriesAligner": { + // "description": "An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.", + // "enum": [ + // "ALIGN_NONE", + // "ALIGN_DELTA", + // "ALIGN_RATE", + // "ALIGN_INTERPOLATE", + // "ALIGN_NEXT_OLDER", + // "ALIGN_MIN", + // "ALIGN_MAX", + // "ALIGN_MEAN", + // "ALIGN_COUNT", + // "ALIGN_SUM", + // "ALIGN_STDDEV", + // "ALIGN_COUNT_TRUE", + // "ALIGN_COUNT_FALSE", + // "ALIGN_FRACTION_TRUE", + // "ALIGN_PERCENTILE_99", + // "ALIGN_PERCENTILE_95", + // "ALIGN_PERCENTILE_50", + // "ALIGN_PERCENTILE_05", + // "ALIGN_PERCENT_CHANGE" + // ], + // "enumDescriptions": [ + // "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + // "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + // "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + // "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + // "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + // "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + // "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + // ], + // "location": "query", + // "type": "string" + // }, + // "filter": { + // "description": "Required. A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example: metric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND metric.labels.instance_name = \"my-instance-name\" ", + // "location": "query", + // "type": "string" + // }, + // "interval.endTime": { + // "description": "Required. The end of the time interval.", + // "format": "google-datetime", + // "location": "query", + // "type": "string" + // }, + // "interval.startTime": { + // "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", + // "format": "google-datetime", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name), organization or folder on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID] folders/[FOLDER_ID] ", + // "location": "path", + // "pattern": "^folders/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "orderBy": { + // "description": "Unsupported: must be left blank. The points in each time series are currently returned in reverse time order (most recent to oldest).", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "A positive number that is the maximum number of results to return. If page_size is empty or more than 100,000 results, the effective page_size is 100,000 results. If view is set to FULL, this is the maximum number of Points returned. If view is set to HEADERS, this is the maximum number of TimeSeries returned.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + // "location": "query", + // "type": "string" + // }, + // "secondaryAggregation.alignmentPeriod": { + // "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", + // "format": "google-duration", + // "location": "query", + // "type": "string" + // }, + // "secondaryAggregation.crossSeriesReducer": { + // "description": "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.", + // "enum": [ + // "REDUCE_NONE", + // "REDUCE_MEAN", + // "REDUCE_MIN", + // "REDUCE_MAX", + // "REDUCE_SUM", + // "REDUCE_STDDEV", + // "REDUCE_COUNT", + // "REDUCE_COUNT_TRUE", + // "REDUCE_COUNT_FALSE", + // "REDUCE_FRACTION_TRUE", + // "REDUCE_PERCENTILE_99", + // "REDUCE_PERCENTILE_95", + // "REDUCE_PERCENTILE_50", + // "REDUCE_PERCENTILE_05" + // ], + // "enumDescriptions": [ + // "No cross-time series reduction. The output of the Aligner is returned.", + // "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + // ], + // "location": "query", + // "type": "string" + // }, + // "secondaryAggregation.groupByFields": { + // "description": "The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.", + // "location": "query", + // "repeated": true, + // "type": "string" + // }, + // "secondaryAggregation.perSeriesAligner": { + // "description": "An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.", + // "enum": [ + // "ALIGN_NONE", + // "ALIGN_DELTA", + // "ALIGN_RATE", + // "ALIGN_INTERPOLATE", + // "ALIGN_NEXT_OLDER", + // "ALIGN_MIN", + // "ALIGN_MAX", + // "ALIGN_MEAN", + // "ALIGN_COUNT", + // "ALIGN_SUM", + // "ALIGN_STDDEV", + // "ALIGN_COUNT_TRUE", + // "ALIGN_COUNT_FALSE", + // "ALIGN_FRACTION_TRUE", + // "ALIGN_PERCENTILE_99", + // "ALIGN_PERCENTILE_95", + // "ALIGN_PERCENTILE_50", + // "ALIGN_PERCENTILE_05", + // "ALIGN_PERCENT_CHANGE" + // ], + // "enumDescriptions": [ + // "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + // "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + // "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + // "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + // "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + // "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + // "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + // ], + // "location": "query", + // "type": "string" + // }, + // "view": { + // "description": "Required. Specifies which information is returned about the time series.", + // "enum": [ + // "FULL", + // "HEADERS" + // ], + // "enumDescriptions": [ + // "Returns the identity of the metric(s), the time series, and the time series data.", + // "Returns the identity of the metric and the time series resource, but not the time series data." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v3/{+name}/timeSeries", + // "response": { + // "$ref": "ListTimeSeriesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *FoldersTimeSeriesListCall) Pages(ctx context.Context, f func(*ListTimeSeriesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "monitoring.organizations.timeSeries.list": + +type OrganizationsTimeSeriesListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists time series that match a filter. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name), +// organization or folder on which to execute the request. The format +// is: projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID] +// folders/[FOLDER_ID]. +func (r *OrganizationsTimeSeriesService) List(name string) *OrganizationsTimeSeriesListCall { + c := &OrganizationsTimeSeriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// AggregationAlignmentPeriod sets the optional parameter +// "aggregation.alignmentPeriod": The alignment_period specifies a time +// interval, in seconds, that is used to divide the data in all the time +// series into consistent blocks of time. This will be done before the +// per-series aligner can be applied to the data.The value must be at +// least 60 seconds. If a per-series aligner other than ALIGN_NONE is +// specified, this field is required or an error is returned. If no +// per-series aligner is specified, or the aligner ALIGN_NONE is +// specified, then this field is ignored.The maximum value of the +// alignment_period is 104 weeks (2 years) for charts, and 90,000 +// seconds (25 hours) for alerting policies. +func (c *OrganizationsTimeSeriesListCall) AggregationAlignmentPeriod(aggregationAlignmentPeriod string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("aggregation.alignmentPeriod", aggregationAlignmentPeriod) + return c +} + +// AggregationCrossSeriesReducer sets the optional parameter +// "aggregation.crossSeriesReducer": The reduction operation to be used +// to combine time series into a single time series, where the value of +// each data point in the resulting series is a function of all the +// already aligned values in the input time series.Not all reducer +// operations can be applied to all time series. The valid choices +// depend on the metric_kind and the value_type of the original time +// series. Reduction can yield a time series with a different +// metric_kind or value_type than the input time series.Time series data +// must first be aligned (see per_series_aligner) in order to perform +// cross-time series reduction. If cross_series_reducer is specified, +// then per_series_aligner must be specified, and must not be +// ALIGN_NONE. An alignment_period must also be specified; otherwise, an +// error is returned. +// +// Possible values: +// +// "REDUCE_NONE" - No cross-time series reduction. The output of the +// +// Aligner is returned. +// +// "REDUCE_MEAN" - Reduce by computing the mean value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric or distribution values. The value_type of +// the output is DOUBLE. +// +// "REDUCE_MIN" - Reduce by computing the minimum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_MAX" - Reduce by computing the maximum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_SUM" - Reduce by computing the sum across time series for +// +// each alignment period. This reducer is valid for DELTA and GAUGE +// metrics with numeric and distribution values. The value_type of the +// output is the same as the value_type of the input. +// +// "REDUCE_STDDEV" - Reduce by computing the standard deviation across +// +// time series for each alignment period. This reducer is valid for +// DELTA and GAUGE metrics with numeric or distribution values. The +// value_type of the output is DOUBLE. +// +// "REDUCE_COUNT" - Reduce by computing the number of data points +// +// across time series for each alignment period. This reducer is valid +// for DELTA and GAUGE metrics of numeric, Boolean, distribution, and +// string value_type. The value_type of the output is INT64. +// +// "REDUCE_COUNT_TRUE" - Reduce by computing the number of True-valued +// +// data points across time series for each alignment period. This +// reducer is valid for DELTA and GAUGE metrics of Boolean value_type. +// The value_type of the output is INT64. +// +// "REDUCE_COUNT_FALSE" - Reduce by computing the number of +// +// False-valued data points across time series for each alignment +// period. This reducer is valid for DELTA and GAUGE metrics of Boolean +// value_type. The value_type of the output is INT64. +// +// "REDUCE_FRACTION_TRUE" - Reduce by computing the ratio of the +// +// number of True-valued data points to the total number of data points +// for each alignment period. This reducer is valid for DELTA and GAUGE +// metrics of Boolean value_type. The output value is in the range 0.0, +// 1.0 and has value_type DOUBLE. +// +// "REDUCE_PERCENTILE_99" - Reduce by computing the 99th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_95" - Reduce by computing the 95th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_50" - Reduce by computing the 50th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_05" - Reduce by computing the 5th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +func (c *OrganizationsTimeSeriesListCall) AggregationCrossSeriesReducer(aggregationCrossSeriesReducer string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("aggregation.crossSeriesReducer", aggregationCrossSeriesReducer) + return c +} + +// AggregationGroupByFields sets the optional parameter +// "aggregation.groupByFields": The set of fields to preserve when +// cross_series_reducer is specified. The group_by_fields determine how +// the time series are partitioned into subsets prior to applying the +// aggregation operation. Each subset contains time series that have the +// same value for each of the grouping fields. Each individual time +// series is a member of exactly one subset. The cross_series_reducer is +// applied to each subset of time series. It is not possible to reduce +// across different resource types, so this field implicitly contains +// resource.type. Fields not specified in group_by_fields are aggregated +// away. If group_by_fields is not specified and all the time series +// have the same resource type, then the time series are aggregated into +// a single output time series. If cross_series_reducer is not defined, +// this field is ignored. +func (c *OrganizationsTimeSeriesListCall) AggregationGroupByFields(aggregationGroupByFields ...string) *OrganizationsTimeSeriesListCall { + c.urlParams_.SetMulti("aggregation.groupByFields", append([]string{}, aggregationGroupByFields...)) + return c +} + +// AggregationPerSeriesAligner sets the optional parameter +// "aggregation.perSeriesAligner": An Aligner describes how to bring the +// data points in a single time series into temporal alignment. Except +// for ALIGN_NONE, all alignments cause all the data points in an +// alignment_period to be mathematically grouped together, resulting in +// a single data point for each alignment_period with end timestamp at +// the end of the period.Not all alignment operations may be applied to +// all time series. The valid choices depend on the metric_kind and +// value_type of the original time series. Alignment can change the +// metric_kind or the value_type of the time series.Time series data +// must be aligned in order to perform cross-time series reduction. If +// cross_series_reducer is specified, then per_series_aligner must be +// specified and not equal to ALIGN_NONE and alignment_period must be +// specified; otherwise, an error is returned. +// +// Possible values: +// +// "ALIGN_NONE" - No alignment. Raw data is returned. Not valid if +// +// cross-series reduction is requested. The value_type of the result is +// the same as the value_type of the input. +// +// "ALIGN_DELTA" - Align and convert to DELTA. The output is delta = +// +// y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If +// the selected alignment period results in periods with no data, then +// the aligned value for such a period is created by interpolation. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_RATE" - Align and convert to a rate. The result is computed +// +// as rate = (y1 - y0)/(t1 - t0), or "delta over time". Think of this +// aligner as providing the slope of the line that passes through the +// value at the start and at the end of the alignment_period.This +// aligner is valid for CUMULATIVE and DELTA metrics with numeric +// values. If the selected alignment period results in periods with no +// data, then the aligned value for such a period is created by +// interpolation. The output is a GAUGE metric with value_type +// DOUBLE.If, by "rate", you mean "percentage change", see the +// ALIGN_PERCENT_CHANGE aligner instead. +// +// "ALIGN_INTERPOLATE" - Align by interpolating between adjacent +// +// points around the alignment period boundary. This aligner is valid +// for GAUGE metrics with numeric values. The value_type of the aligned +// result is the same as the value_type of the input. +// +// "ALIGN_NEXT_OLDER" - Align by moving the most recent data point +// +// before the end of the alignment period to the boundary at the end of +// the alignment period. This aligner is valid for GAUGE metrics. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_MIN" - Align the time series by returning the minimum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MAX" - Align the time series by returning the maximum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MEAN" - Align the time series by returning the mean value in +// +// each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// DOUBLE. +// +// "ALIGN_COUNT" - Align the time series by returning the number of +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric or Boolean values. The value_type of the +// aligned result is INT64. +// +// "ALIGN_SUM" - Align the time series by returning the sum of the +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric and distribution values. The value_type of +// the aligned result is the same as the value_type of the input. +// +// "ALIGN_STDDEV" - Align the time series by returning the standard +// +// deviation of the values in each alignment period. This aligner is +// valid for GAUGE and DELTA metrics with numeric values. The value_type +// of the output is DOUBLE. +// +// "ALIGN_COUNT_TRUE" - Align the time series by returning the number +// +// of True values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_COUNT_FALSE" - Align the time series by returning the number +// +// of False values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_FRACTION_TRUE" - Align the time series by returning the +// +// ratio of the number of True values to the total number of values in +// each alignment period. This aligner is valid for GAUGE metrics with +// Boolean values. The output value is in the range 0.0, 1.0 and has +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_99" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 99th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_95" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 95th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_50" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 50th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_05" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 5th percentile of all data +// points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENT_CHANGE" - Align and convert to a percentage change. +// +// This aligner is valid for GAUGE and DELTA metrics with numeric +// values. This alignment returns ((current - previous)/previous) * 100, +// where the value of previous is determined based on the +// alignment_period.If the values of current and previous are both 0, +// then the returned value is 0. If only previous is 0, the returned +// value is infinity.A 10-minute moving mean is computed at each point +// of the alignment period prior to the above calculation to smooth the +// metric and prevent false positives from very short-lived spikes. The +// moving mean is only applicable for data whose values are >= 0. Any +// values < 0 are treated as a missing datapoint, and are ignored. While +// DELTA metrics are accepted by this alignment, special care should be +// taken that the values for the metric will always be positive. The +// output is a GAUGE metric with value_type DOUBLE. +func (c *OrganizationsTimeSeriesListCall) AggregationPerSeriesAligner(aggregationPerSeriesAligner string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("aggregation.perSeriesAligner", aggregationPerSeriesAligner) + return c +} + +// Filter sets the optional parameter "filter": Required. A monitoring +// filter (https://cloud.google.com/monitoring/api/v3/filters) that +// specifies which time series should be returned. The filter must +// specify a single metric type, and can additionally specify metric +// labels and other information. For example: metric.type = +// "compute.googleapis.com/instance/cpu/usage_time" AND +// metric.labels.instance_name = "my-instance-name" +func (c *OrganizationsTimeSeriesListCall) Filter(filter string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IntervalEndTime sets the optional parameter "interval.endTime": +// Required. The end of the time interval. +func (c *OrganizationsTimeSeriesListCall) IntervalEndTime(intervalEndTime string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("interval.endTime", intervalEndTime) + return c +} + +// IntervalStartTime sets the optional parameter "interval.startTime": +// The beginning of the time interval. The default value for the start +// time is the end time. The start time must not be later than the end +// time. +func (c *OrganizationsTimeSeriesListCall) IntervalStartTime(intervalStartTime string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("interval.startTime", intervalStartTime) + return c +} + +// OrderBy sets the optional parameter "orderBy": Unsupported: must be +// left blank. The points in each time series are currently returned in +// reverse time order (most recent to oldest). +func (c *OrganizationsTimeSeriesListCall) OrderBy(orderBy string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": A positive number +// that is the maximum number of results to return. If page_size is +// empty or more than 100,000 results, the effective page_size is +// 100,000 results. If view is set to FULL, this is the maximum number +// of Points returned. If view is set to HEADERS, this is the maximum +// number of TimeSeries returned. +func (c *OrganizationsTimeSeriesListCall) PageSize(pageSize int64) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If this field is +// not empty then it must contain the nextPageToken value returned by a +// previous call to this method. Using this field causes the method to +// return additional results from the previous method call. +func (c *OrganizationsTimeSeriesListCall) PageToken(pageToken string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// SecondaryAggregationAlignmentPeriod sets the optional parameter +// "secondaryAggregation.alignmentPeriod": The alignment_period +// specifies a time interval, in seconds, that is used to divide the +// data in all the time series into consistent blocks of time. This will +// be done before the per-series aligner can be applied to the data.The +// value must be at least 60 seconds. If a per-series aligner other than +// ALIGN_NONE is specified, this field is required or an error is +// returned. If no per-series aligner is specified, or the aligner +// ALIGN_NONE is specified, then this field is ignored.The maximum value +// of the alignment_period is 104 weeks (2 years) for charts, and 90,000 +// seconds (25 hours) for alerting policies. +func (c *OrganizationsTimeSeriesListCall) SecondaryAggregationAlignmentPeriod(secondaryAggregationAlignmentPeriod string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("secondaryAggregation.alignmentPeriod", secondaryAggregationAlignmentPeriod) + return c +} + +// SecondaryAggregationCrossSeriesReducer sets the optional parameter +// "secondaryAggregation.crossSeriesReducer": The reduction operation to +// be used to combine time series into a single time series, where the +// value of each data point in the resulting series is a function of all +// the already aligned values in the input time series.Not all reducer +// operations can be applied to all time series. The valid choices +// depend on the metric_kind and the value_type of the original time +// series. Reduction can yield a time series with a different +// metric_kind or value_type than the input time series.Time series data +// must first be aligned (see per_series_aligner) in order to perform +// cross-time series reduction. If cross_series_reducer is specified, +// then per_series_aligner must be specified, and must not be +// ALIGN_NONE. An alignment_period must also be specified; otherwise, an +// error is returned. +// +// Possible values: +// +// "REDUCE_NONE" - No cross-time series reduction. The output of the +// +// Aligner is returned. +// +// "REDUCE_MEAN" - Reduce by computing the mean value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric or distribution values. The value_type of +// the output is DOUBLE. +// +// "REDUCE_MIN" - Reduce by computing the minimum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_MAX" - Reduce by computing the maximum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_SUM" - Reduce by computing the sum across time series for +// +// each alignment period. This reducer is valid for DELTA and GAUGE +// metrics with numeric and distribution values. The value_type of the +// output is the same as the value_type of the input. +// +// "REDUCE_STDDEV" - Reduce by computing the standard deviation across +// +// time series for each alignment period. This reducer is valid for +// DELTA and GAUGE metrics with numeric or distribution values. The +// value_type of the output is DOUBLE. +// +// "REDUCE_COUNT" - Reduce by computing the number of data points +// +// across time series for each alignment period. This reducer is valid +// for DELTA and GAUGE metrics of numeric, Boolean, distribution, and +// string value_type. The value_type of the output is INT64. +// +// "REDUCE_COUNT_TRUE" - Reduce by computing the number of True-valued +// +// data points across time series for each alignment period. This +// reducer is valid for DELTA and GAUGE metrics of Boolean value_type. +// The value_type of the output is INT64. +// +// "REDUCE_COUNT_FALSE" - Reduce by computing the number of +// +// False-valued data points across time series for each alignment +// period. This reducer is valid for DELTA and GAUGE metrics of Boolean +// value_type. The value_type of the output is INT64. +// +// "REDUCE_FRACTION_TRUE" - Reduce by computing the ratio of the +// +// number of True-valued data points to the total number of data points +// for each alignment period. This reducer is valid for DELTA and GAUGE +// metrics of Boolean value_type. The output value is in the range 0.0, +// 1.0 and has value_type DOUBLE. +// +// "REDUCE_PERCENTILE_99" - Reduce by computing the 99th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_95" - Reduce by computing the 95th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_50" - Reduce by computing the 50th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_05" - Reduce by computing the 5th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +func (c *OrganizationsTimeSeriesListCall) SecondaryAggregationCrossSeriesReducer(secondaryAggregationCrossSeriesReducer string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("secondaryAggregation.crossSeriesReducer", secondaryAggregationCrossSeriesReducer) + return c +} + +// SecondaryAggregationGroupByFields sets the optional parameter +// "secondaryAggregation.groupByFields": The set of fields to preserve +// when cross_series_reducer is specified. The group_by_fields determine +// how the time series are partitioned into subsets prior to applying +// the aggregation operation. Each subset contains time series that have +// the same value for each of the grouping fields. Each individual time +// series is a member of exactly one subset. The cross_series_reducer is +// applied to each subset of time series. It is not possible to reduce +// across different resource types, so this field implicitly contains +// resource.type. Fields not specified in group_by_fields are aggregated +// away. If group_by_fields is not specified and all the time series +// have the same resource type, then the time series are aggregated into +// a single output time series. If cross_series_reducer is not defined, +// this field is ignored. +func (c *OrganizationsTimeSeriesListCall) SecondaryAggregationGroupByFields(secondaryAggregationGroupByFields ...string) *OrganizationsTimeSeriesListCall { + c.urlParams_.SetMulti("secondaryAggregation.groupByFields", append([]string{}, secondaryAggregationGroupByFields...)) + return c +} + +// SecondaryAggregationPerSeriesAligner sets the optional parameter +// "secondaryAggregation.perSeriesAligner": An Aligner describes how to +// bring the data points in a single time series into temporal +// alignment. Except for ALIGN_NONE, all alignments cause all the data +// points in an alignment_period to be mathematically grouped together, +// resulting in a single data point for each alignment_period with end +// timestamp at the end of the period.Not all alignment operations may +// be applied to all time series. The valid choices depend on the +// metric_kind and value_type of the original time series. Alignment can +// change the metric_kind or the value_type of the time series.Time +// series data must be aligned in order to perform cross-time series +// reduction. If cross_series_reducer is specified, then +// per_series_aligner must be specified and not equal to ALIGN_NONE and +// alignment_period must be specified; otherwise, an error is returned. +// +// Possible values: +// +// "ALIGN_NONE" - No alignment. Raw data is returned. Not valid if +// +// cross-series reduction is requested. The value_type of the result is +// the same as the value_type of the input. +// +// "ALIGN_DELTA" - Align and convert to DELTA. The output is delta = +// +// y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If +// the selected alignment period results in periods with no data, then +// the aligned value for such a period is created by interpolation. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_RATE" - Align and convert to a rate. The result is computed +// +// as rate = (y1 - y0)/(t1 - t0), or "delta over time". Think of this +// aligner as providing the slope of the line that passes through the +// value at the start and at the end of the alignment_period.This +// aligner is valid for CUMULATIVE and DELTA metrics with numeric +// values. If the selected alignment period results in periods with no +// data, then the aligned value for such a period is created by +// interpolation. The output is a GAUGE metric with value_type +// DOUBLE.If, by "rate", you mean "percentage change", see the +// ALIGN_PERCENT_CHANGE aligner instead. +// +// "ALIGN_INTERPOLATE" - Align by interpolating between adjacent +// +// points around the alignment period boundary. This aligner is valid +// for GAUGE metrics with numeric values. The value_type of the aligned +// result is the same as the value_type of the input. +// +// "ALIGN_NEXT_OLDER" - Align by moving the most recent data point +// +// before the end of the alignment period to the boundary at the end of +// the alignment period. This aligner is valid for GAUGE metrics. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_MIN" - Align the time series by returning the minimum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MAX" - Align the time series by returning the maximum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MEAN" - Align the time series by returning the mean value in +// +// each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// DOUBLE. +// +// "ALIGN_COUNT" - Align the time series by returning the number of +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric or Boolean values. The value_type of the +// aligned result is INT64. +// +// "ALIGN_SUM" - Align the time series by returning the sum of the +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric and distribution values. The value_type of +// the aligned result is the same as the value_type of the input. +// +// "ALIGN_STDDEV" - Align the time series by returning the standard +// +// deviation of the values in each alignment period. This aligner is +// valid for GAUGE and DELTA metrics with numeric values. The value_type +// of the output is DOUBLE. +// +// "ALIGN_COUNT_TRUE" - Align the time series by returning the number +// +// of True values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_COUNT_FALSE" - Align the time series by returning the number +// +// of False values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_FRACTION_TRUE" - Align the time series by returning the +// +// ratio of the number of True values to the total number of values in +// each alignment period. This aligner is valid for GAUGE metrics with +// Boolean values. The output value is in the range 0.0, 1.0 and has +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_99" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 99th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_95" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 95th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_50" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 50th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_05" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 5th percentile of all data +// points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENT_CHANGE" - Align and convert to a percentage change. +// +// This aligner is valid for GAUGE and DELTA metrics with numeric +// values. This alignment returns ((current - previous)/previous) * 100, +// where the value of previous is determined based on the +// alignment_period.If the values of current and previous are both 0, +// then the returned value is 0. If only previous is 0, the returned +// value is infinity.A 10-minute moving mean is computed at each point +// of the alignment period prior to the above calculation to smooth the +// metric and prevent false positives from very short-lived spikes. The +// moving mean is only applicable for data whose values are >= 0. Any +// values < 0 are treated as a missing datapoint, and are ignored. While +// DELTA metrics are accepted by this alignment, special care should be +// taken that the values for the metric will always be positive. The +// output is a GAUGE metric with value_type DOUBLE. +func (c *OrganizationsTimeSeriesListCall) SecondaryAggregationPerSeriesAligner(secondaryAggregationPerSeriesAligner string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("secondaryAggregation.perSeriesAligner", secondaryAggregationPerSeriesAligner) + return c +} + +// View sets the optional parameter "view": Required. Specifies which +// information is returned about the time series. +// +// Possible values: +// +// "FULL" - Returns the identity of the metric(s), the time series, +// +// and the time series data. +// +// "HEADERS" - Returns the identity of the metric and the time series +// +// resource, but not the time series data. +func (c *OrganizationsTimeSeriesListCall) View(view string) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("view", view) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsTimeSeriesListCall) Fields(s ...googleapi.Field) *OrganizationsTimeSeriesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsTimeSeriesListCall) IfNoneMatch(entityTag string) *OrganizationsTimeSeriesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsTimeSeriesListCall) Context(ctx context.Context) *OrganizationsTimeSeriesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsTimeSeriesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsTimeSeriesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/timeSeries") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.organizations.timeSeries.list" call. +// Exactly one of *ListTimeSeriesResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListTimeSeriesResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTimeSeriesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListTimeSeriesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists time series that match a filter.", + // "flatPath": "v3/organizations/{organizationsId}/timeSeries", + // "httpMethod": "GET", + // "id": "monitoring.organizations.timeSeries.list", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "aggregation.alignmentPeriod": { + // "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", + // "format": "google-duration", + // "location": "query", + // "type": "string" + // }, + // "aggregation.crossSeriesReducer": { + // "description": "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.", + // "enum": [ + // "REDUCE_NONE", + // "REDUCE_MEAN", + // "REDUCE_MIN", + // "REDUCE_MAX", + // "REDUCE_SUM", + // "REDUCE_STDDEV", + // "REDUCE_COUNT", + // "REDUCE_COUNT_TRUE", + // "REDUCE_COUNT_FALSE", + // "REDUCE_FRACTION_TRUE", + // "REDUCE_PERCENTILE_99", + // "REDUCE_PERCENTILE_95", + // "REDUCE_PERCENTILE_50", + // "REDUCE_PERCENTILE_05" + // ], + // "enumDescriptions": [ + // "No cross-time series reduction. The output of the Aligner is returned.", + // "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + // ], + // "location": "query", + // "type": "string" + // }, + // "aggregation.groupByFields": { + // "description": "The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.", + // "location": "query", + // "repeated": true, + // "type": "string" + // }, + // "aggregation.perSeriesAligner": { + // "description": "An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.", + // "enum": [ + // "ALIGN_NONE", + // "ALIGN_DELTA", + // "ALIGN_RATE", + // "ALIGN_INTERPOLATE", + // "ALIGN_NEXT_OLDER", + // "ALIGN_MIN", + // "ALIGN_MAX", + // "ALIGN_MEAN", + // "ALIGN_COUNT", + // "ALIGN_SUM", + // "ALIGN_STDDEV", + // "ALIGN_COUNT_TRUE", + // "ALIGN_COUNT_FALSE", + // "ALIGN_FRACTION_TRUE", + // "ALIGN_PERCENTILE_99", + // "ALIGN_PERCENTILE_95", + // "ALIGN_PERCENTILE_50", + // "ALIGN_PERCENTILE_05", + // "ALIGN_PERCENT_CHANGE" + // ], + // "enumDescriptions": [ + // "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + // "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + // "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + // "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + // "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + // "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + // "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + // ], + // "location": "query", + // "type": "string" + // }, + // "filter": { + // "description": "Required. A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example: metric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND metric.labels.instance_name = \"my-instance-name\" ", + // "location": "query", + // "type": "string" + // }, + // "interval.endTime": { + // "description": "Required. The end of the time interval.", + // "format": "google-datetime", + // "location": "query", + // "type": "string" + // }, + // "interval.startTime": { + // "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", + // "format": "google-datetime", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name), organization or folder on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID] folders/[FOLDER_ID] ", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "orderBy": { + // "description": "Unsupported: must be left blank. The points in each time series are currently returned in reverse time order (most recent to oldest).", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "A positive number that is the maximum number of results to return. If page_size is empty or more than 100,000 results, the effective page_size is 100,000 results. If view is set to FULL, this is the maximum number of Points returned. If view is set to HEADERS, this is the maximum number of TimeSeries returned.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + // "location": "query", + // "type": "string" + // }, + // "secondaryAggregation.alignmentPeriod": { + // "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", + // "format": "google-duration", + // "location": "query", + // "type": "string" + // }, + // "secondaryAggregation.crossSeriesReducer": { + // "description": "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.", + // "enum": [ + // "REDUCE_NONE", + // "REDUCE_MEAN", + // "REDUCE_MIN", + // "REDUCE_MAX", + // "REDUCE_SUM", + // "REDUCE_STDDEV", + // "REDUCE_COUNT", + // "REDUCE_COUNT_TRUE", + // "REDUCE_COUNT_FALSE", + // "REDUCE_FRACTION_TRUE", + // "REDUCE_PERCENTILE_99", + // "REDUCE_PERCENTILE_95", + // "REDUCE_PERCENTILE_50", + // "REDUCE_PERCENTILE_05" + // ], + // "enumDescriptions": [ + // "No cross-time series reduction. The output of the Aligner is returned.", + // "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + // ], + // "location": "query", + // "type": "string" + // }, + // "secondaryAggregation.groupByFields": { + // "description": "The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.", + // "location": "query", + // "repeated": true, + // "type": "string" + // }, + // "secondaryAggregation.perSeriesAligner": { + // "description": "An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.", + // "enum": [ + // "ALIGN_NONE", + // "ALIGN_DELTA", + // "ALIGN_RATE", + // "ALIGN_INTERPOLATE", + // "ALIGN_NEXT_OLDER", + // "ALIGN_MIN", + // "ALIGN_MAX", + // "ALIGN_MEAN", + // "ALIGN_COUNT", + // "ALIGN_SUM", + // "ALIGN_STDDEV", + // "ALIGN_COUNT_TRUE", + // "ALIGN_COUNT_FALSE", + // "ALIGN_FRACTION_TRUE", + // "ALIGN_PERCENTILE_99", + // "ALIGN_PERCENTILE_95", + // "ALIGN_PERCENTILE_50", + // "ALIGN_PERCENTILE_05", + // "ALIGN_PERCENT_CHANGE" + // ], + // "enumDescriptions": [ + // "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + // "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + // "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + // "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + // "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + // "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + // "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + // ], + // "location": "query", + // "type": "string" + // }, + // "view": { + // "description": "Required. Specifies which information is returned about the time series.", + // "enum": [ + // "FULL", + // "HEADERS" + // ], + // "enumDescriptions": [ + // "Returns the identity of the metric(s), the time series, and the time series data.", + // "Returns the identity of the metric and the time series resource, but not the time series data." + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v3/{+name}/timeSeries", + // "response": { + // "$ref": "ListTimeSeriesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OrganizationsTimeSeriesListCall) Pages(ctx context.Context, f func(*ListTimeSeriesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "monitoring.projects.alertPolicies.create": type ProjectsAlertPoliciesCreateCall struct { s *Service @@ -5582,18 +8657,884 @@ type ProjectsAlertPoliciesCreateCall struct { header_ http.Header } -// Create: Creates a new alerting policy. -func (r *ProjectsAlertPoliciesService) Create(name string, alertpolicy *AlertPolicy) *ProjectsAlertPoliciesCreateCall { - c := &ProjectsAlertPoliciesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Create: Creates a new alerting policy.Design your application to +// single-thread API calls that modify the state of alerting policies in +// a single project. This includes calls to CreateAlertPolicy, +// DeleteAlertPolicy and UpdateAlertPolicy. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) in which +// to create the alerting policy. The format is: +// projects/[PROJECT_ID_OR_NUMBER] Note that this field names the +// parent container in which the alerting policy will be written, not +// the name of the created policy. |name| must be a host project of a +// Metrics Scope, otherwise INVALID_ARGUMENT error will return. The +// alerting policy that is returned will have a name that contains a +// normalized representation of this name as a prefix but adds a +// suffix of the form /alertPolicies/[ALERT_POLICY_ID], identifying +// the policy in the container. +func (r *ProjectsAlertPoliciesService) Create(name string, alertpolicy *AlertPolicy) *ProjectsAlertPoliciesCreateCall { + c := &ProjectsAlertPoliciesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.alertpolicy = alertpolicy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAlertPoliciesCreateCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAlertPoliciesCreateCall) Context(ctx context.Context) *ProjectsAlertPoliciesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAlertPoliciesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAlertPoliciesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.alertpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/alertPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.projects.alertPolicies.create" call. +// Exactly one of *AlertPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AlertPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsAlertPoliciesCreateCall) Do(opts ...googleapi.CallOption) (*AlertPolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AlertPolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new alerting policy.Design your application to single-thread API calls that modify the state of alerting policies in a single project. This includes calls to CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.", + // "flatPath": "v3/projects/{projectsId}/alertPolicies", + // "httpMethod": "POST", + // "id": "monitoring.projects.alertPolicies.create", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) in which to create the alerting policy. The format is: projects/[PROJECT_ID_OR_NUMBER] Note that this field names the parent container in which the alerting policy will be written, not the name of the created policy. |name| must be a host project of a Metrics Scope, otherwise INVALID_ARGUMENT error will return. The alerting policy that is returned will have a name that contains a normalized representation of this name as a prefix but adds a suffix of the form /alertPolicies/[ALERT_POLICY_ID], identifying the policy in the container.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v3/{+name}/alertPolicies", + // "request": { + // "$ref": "AlertPolicy" + // }, + // "response": { + // "$ref": "AlertPolicy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// method id "monitoring.projects.alertPolicies.delete": + +type ProjectsAlertPoliciesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an alerting policy.Design your application to +// single-thread API calls that modify the state of alerting policies in +// a single project. This includes calls to CreateAlertPolicy, +// DeleteAlertPolicy and UpdateAlertPolicy. +// +// - name: The alerting policy to delete. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] For +// more information, see AlertPolicy. +func (r *ProjectsAlertPoliciesService) Delete(name string) *ProjectsAlertPoliciesDeleteCall { + c := &ProjectsAlertPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAlertPoliciesDeleteCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAlertPoliciesDeleteCall) Context(ctx context.Context) *ProjectsAlertPoliciesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAlertPoliciesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAlertPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.projects.alertPolicies.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsAlertPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an alerting policy.Design your application to single-thread API calls that modify the state of alerting policies in a single project. This includes calls to CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.", + // "flatPath": "v3/projects/{projectsId}/alertPolicies/{alertPoliciesId}", + // "httpMethod": "DELETE", + // "id": "monitoring.projects.alertPolicies.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The alerting policy to delete. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] For more information, see AlertPolicy.", + // "location": "path", + // "pattern": "^projects/[^/]+/alertPolicies/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v3/{+name}", + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// method id "monitoring.projects.alertPolicies.get": + +type ProjectsAlertPoliciesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a single alerting policy. +// +// - name: The alerting policy to retrieve. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]. +func (r *ProjectsAlertPoliciesService) Get(name string) *ProjectsAlertPoliciesGetCall { + c := &ProjectsAlertPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAlertPoliciesGetCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAlertPoliciesGetCall) IfNoneMatch(entityTag string) *ProjectsAlertPoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAlertPoliciesGetCall) Context(ctx context.Context) *ProjectsAlertPoliciesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAlertPoliciesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAlertPoliciesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.projects.alertPolicies.get" call. +// Exactly one of *AlertPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AlertPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsAlertPoliciesGetCall) Do(opts ...googleapi.CallOption) (*AlertPolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AlertPolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a single alerting policy.", + // "flatPath": "v3/projects/{projectsId}/alertPolicies/{alertPoliciesId}", + // "httpMethod": "GET", + // "id": "monitoring.projects.alertPolicies.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The alerting policy to retrieve. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] ", + // "location": "path", + // "pattern": "^projects/[^/]+/alertPolicies/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v3/{+name}", + // "response": { + // "$ref": "AlertPolicy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read" + // ] + // } + +} + +// method id "monitoring.projects.alertPolicies.list": + +type ProjectsAlertPoliciesListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the existing alerting policies for the workspace. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) whose +// alert policies are to be listed. The format is: +// projects/[PROJECT_ID_OR_NUMBER] Note that this field names the +// parent container in which the alerting policies to be listed are +// stored. To retrieve a single alerting policy by name, use the +// GetAlertPolicy operation, instead. +func (r *ProjectsAlertPoliciesService) List(name string) *ProjectsAlertPoliciesListCall { + c := &ProjectsAlertPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Filter sets the optional parameter "filter": If provided, this field +// specifies the criteria that must be met by alert policies to be +// included in the response.For more details, see sorting and filtering +// (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). +func (c *ProjectsAlertPoliciesListCall) Filter(filter string) *ProjectsAlertPoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": A comma-separated list +// of fields by which to sort the result. Supports the same set of field +// references as the filter field. Entries can be prefixed with a minus +// sign to sort by the field in descending order.For more details, see +// sorting and filtering +// (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). +func (c *ProjectsAlertPoliciesListCall) OrderBy(orderBy string) *ProjectsAlertPoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return in a single response. +func (c *ProjectsAlertPoliciesListCall) PageSize(pageSize int64) *ProjectsAlertPoliciesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If this field is +// not empty then it must contain the nextPageToken value returned by a +// previous call to this method. Using this field causes the method to +// return more results from the previous method call. +func (c *ProjectsAlertPoliciesListCall) PageToken(pageToken string) *ProjectsAlertPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAlertPoliciesListCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAlertPoliciesListCall) IfNoneMatch(entityTag string) *ProjectsAlertPoliciesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAlertPoliciesListCall) Context(ctx context.Context) *ProjectsAlertPoliciesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAlertPoliciesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAlertPoliciesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/alertPolicies") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.projects.alertPolicies.list" call. +// Exactly one of *ListAlertPoliciesResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ListAlertPoliciesResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAlertPoliciesListCall) Do(opts ...googleapi.CallOption) (*ListAlertPoliciesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ListAlertPoliciesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the existing alerting policies for the workspace.", + // "flatPath": "v3/projects/{projectsId}/alertPolicies", + // "httpMethod": "GET", + // "id": "monitoring.projects.alertPolicies.list", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "filter": { + // "description": "If provided, this field specifies the criteria that must be met by alert policies to be included in the response.For more details, see sorting and filtering (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) whose alert policies are to be listed. The format is: projects/[PROJECT_ID_OR_NUMBER] Note that this field names the parent container in which the alerting policies to be listed are stored. To retrieve a single alerting policy by name, use the GetAlertPolicy operation, instead.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "orderBy": { + // "description": "A comma-separated list of fields by which to sort the result. Supports the same set of field references as the filter field. Entries can be prefixed with a minus sign to sort by the field in descending order.For more details, see sorting and filtering (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of results to return in a single response.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return more results from the previous method call.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v3/{+name}/alertPolicies", + // "response": { + // "$ref": "ListAlertPoliciesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsAlertPoliciesListCall) Pages(ctx context.Context, f func(*ListAlertPoliciesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "monitoring.projects.alertPolicies.patch": + +type ProjectsAlertPoliciesPatchCall struct { + s *Service + name string + alertpolicy *AlertPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an alerting policy. You can either replace the entire +// policy with a new one or replace only certain fields in the current +// alerting policy by specifying the fields to be updated via +// updateMask. Returns the updated alerting policy.Design your +// application to single-thread API calls that modify the state of +// alerting policies in a single project. This includes calls to +// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy. +// +// - name: Required if the policy exists. The resource name for this +// policy. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] +// [ALERT_POLICY_ID] is assigned by Cloud Monitoring when the policy +// is created. When calling the alertPolicies.create method, do not +// include the name field in the alerting policy passed as part of the +// request. +func (r *ProjectsAlertPoliciesService) Patch(name string, alertpolicy *AlertPolicy) *ProjectsAlertPoliciesPatchCall { + c := &ProjectsAlertPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.alertpolicy = alertpolicy + return c +} + +// UpdateMask sets the optional parameter "updateMask": A list of +// alerting policy field names. If this field is not empty, each listed +// field in the existing alerting policy is set to the value of the +// corresponding field in the supplied policy (alert_policy), or to the +// field's default value if the field is not in the supplied alerting +// policy. Fields not listed retain their previous value.Examples of +// valid field masks include display_name, documentation, +// documentation.content, documentation.mime_type, user_labels, +// user_label.nameofkey, enabled, conditions, combiner, etc.If this +// field is empty, then the supplied alerting policy replaces the +// existing policy. It is the same as deleting the existing policy and +// adding the supplied policy, except for the following: The new policy +// will have the same [ALERT_POLICY_ID] as the former policy. This gives +// you continuity with the former policy in your notifications and +// incidents. Conditions in the new policy will keep their former +// [CONDITION_ID] if the supplied condition includes the name field with +// that [CONDITION_ID]. If the supplied condition omits the name field, +// then a new [CONDITION_ID] is created. +func (c *ProjectsAlertPoliciesPatchCall) UpdateMask(updateMask string) *ProjectsAlertPoliciesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAlertPoliciesPatchCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAlertPoliciesPatchCall) Context(ctx context.Context) *ProjectsAlertPoliciesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAlertPoliciesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAlertPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.alertpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "monitoring.projects.alertPolicies.patch" call. +// Exactly one of *AlertPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AlertPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsAlertPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*AlertPolicy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &AlertPolicy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an alerting policy. You can either replace the entire policy with a new one or replace only certain fields in the current alerting policy by specifying the fields to be updated via updateMask. Returns the updated alerting policy.Design your application to single-thread API calls that modify the state of alerting policies in a single project. This includes calls to CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.", + // "flatPath": "v3/projects/{projectsId}/alertPolicies/{alertPoliciesId}", + // "httpMethod": "PATCH", + // "id": "monitoring.projects.alertPolicies.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required if the policy exists. The resource name for this policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] [ALERT_POLICY_ID] is assigned by Cloud Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.", + // "location": "path", + // "pattern": "^projects/[^/]+/alertPolicies/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Optional. A list of alerting policy field names. If this field is not empty, each listed field in the existing alerting policy is set to the value of the corresponding field in the supplied policy (alert_policy), or to the field's default value if the field is not in the supplied alerting policy. Fields not listed retain their previous value.Examples of valid field masks include display_name, documentation, documentation.content, documentation.mime_type, user_labels, user_label.nameofkey, enabled, conditions, combiner, etc.If this field is empty, then the supplied alerting policy replaces the existing policy. It is the same as deleting the existing policy and adding the supplied policy, except for the following: The new policy will have the same [ALERT_POLICY_ID] as the former policy. This gives you continuity with the former policy in your notifications and incidents. Conditions in the new policy will keep their former [CONDITION_ID] if the supplied condition includes the name field with that [CONDITION_ID]. If the supplied condition omits the name field, then a new [CONDITION_ID] is created.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v3/{+name}", + // "request": { + // "$ref": "AlertPolicy" + // }, + // "response": { + // "$ref": "AlertPolicy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/monitoring" + // ] + // } + +} + +// method id "monitoring.projects.collectdTimeSeries.create": + +type ProjectsCollectdTimeSeriesCreateCall struct { + s *Service + name string + createcollectdtimeseriesrequest *CreateCollectdTimeSeriesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Cloud Monitoring Agent only: Creates a new time series.This +// method is only for use by the Cloud Monitoring Agent. Use +// projects.timeSeries.create instead. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) in which +// to create the time series. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. +func (r *ProjectsCollectdTimeSeriesService) Create(name string, createcollectdtimeseriesrequest *CreateCollectdTimeSeriesRequest) *ProjectsCollectdTimeSeriesCreateCall { + c := &ProjectsCollectdTimeSeriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.alertpolicy = alertpolicy + c.createcollectdtimeseriesrequest = createcollectdtimeseriesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsAlertPoliciesCreateCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesCreateCall { +func (c *ProjectsCollectdTimeSeriesCreateCall) Fields(s ...googleapi.Field) *ProjectsCollectdTimeSeriesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5601,36 +9542,36 @@ func (c *ProjectsAlertPoliciesCreateCall) Fields(s ...googleapi.Field) *Projects // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsAlertPoliciesCreateCall) Context(ctx context.Context) *ProjectsAlertPoliciesCreateCall { +func (c *ProjectsCollectdTimeSeriesCreateCall) Context(ctx context.Context) *ProjectsCollectdTimeSeriesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsAlertPoliciesCreateCall) Header() http.Header { +func (c *ProjectsCollectdTimeSeriesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsAlertPoliciesCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsCollectdTimeSeriesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.alertpolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createcollectdtimeseriesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/alertPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/collectdTimeSeries") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -5643,33 +9584,33 @@ func (c *ProjectsAlertPoliciesCreateCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.alertPolicies.create" call. -// Exactly one of *AlertPolicy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AlertPolicy.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsAlertPoliciesCreateCall) Do(opts ...googleapi.CallOption) (*AlertPolicy, error) { +// Do executes the "monitoring.projects.collectdTimeSeries.create" call. +// Exactly one of *CreateCollectdTimeSeriesResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *CreateCollectdTimeSeriesResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsCollectdTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) (*CreateCollectdTimeSeriesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &AlertPolicy{ + ret := &CreateCollectdTimeSeriesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5681,58 +9622,72 @@ func (c *ProjectsAlertPoliciesCreateCall) Do(opts ...googleapi.CallOption) (*Ale } return ret, nil // { - // "description": "Creates a new alerting policy.", - // "flatPath": "v3/projects/{projectsId}/alertPolicies", + // "description": "Cloud Monitoring Agent only: Creates a new time series.This method is only for use by the Cloud Monitoring Agent. Use projects.timeSeries.create instead.", + // "flatPath": "v3/projects/{projectsId}/collectdTimeSeries", // "httpMethod": "POST", - // "id": "monitoring.projects.alertPolicies.create", + // "id": "monitoring.projects.collectdTimeSeries.create", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The project in which to create the alerting policy. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\nNote that this field names the parent container in which the alerting policy will be written, not the name of the created policy. |name| must be a host project of a workspace, otherwise INVALID_ARGUMENT error will return. The alerting policy that is returned will have a name that contains a normalized representation of this name as a prefix but adds a suffix of the form /alertPolicies/[ALERT_POLICY_ID], identifying the policy in the container.", + // "description": "The project (https://cloud.google.com/monitoring/api/v3#project_name) in which to create the time series. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v3/{+name}/alertPolicies", + // "path": "v3/{+name}/collectdTimeSeries", // "request": { - // "$ref": "AlertPolicy" + // "$ref": "CreateCollectdTimeSeriesRequest" // }, // "response": { - // "$ref": "AlertPolicy" + // "$ref": "CreateCollectdTimeSeriesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.write" // ] // } } -// method id "monitoring.projects.alertPolicies.delete": +// method id "monitoring.projects.groups.create": -type ProjectsAlertPoliciesDeleteCall struct { +type ProjectsGroupsCreateCall struct { s *Service name string + group *Group urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes an alerting policy. -func (r *ProjectsAlertPoliciesService) Delete(name string) *ProjectsAlertPoliciesDeleteCall { - c := &ProjectsAlertPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Create: Creates a new group. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) in which +// to create the group. The format is: projects/[PROJECT_ID_OR_NUMBER]. +func (r *ProjectsGroupsService) Create(name string, group *Group) *ProjectsGroupsCreateCall { + c := &ProjectsGroupsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name + c.group = group + return c +} + +// ValidateOnly sets the optional parameter "validateOnly": If true, +// validate this request but do not create the group. +func (c *ProjectsGroupsCreateCall) ValidateOnly(validateOnly bool) *ProjectsGroupsCreateCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsAlertPoliciesDeleteCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesDeleteCall { +func (c *ProjectsGroupsCreateCall) Fields(s ...googleapi.Field) *ProjectsGroupsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5740,33 +9695,38 @@ func (c *ProjectsAlertPoliciesDeleteCall) Fields(s ...googleapi.Field) *Projects // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsAlertPoliciesDeleteCall) Context(ctx context.Context) *ProjectsAlertPoliciesDeleteCall { +func (c *ProjectsGroupsCreateCall) Context(ctx context.Context) *ProjectsGroupsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsAlertPoliciesDeleteCall) Header() http.Header { +func (c *ProjectsGroupsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsAlertPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGroupsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/groups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -5777,33 +9737,33 @@ func (c *ProjectsAlertPoliciesDeleteCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.alertPolicies.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// Do executes the "monitoring.projects.groups.create" call. +// Exactly one of *Group or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) +// *Group.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ProjectsAlertPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *ProjectsGroupsCreateCall) Do(opts ...googleapi.CallOption) (*Group, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Empty{ + ret := &Group{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5815,25 +9775,33 @@ func (c *ProjectsAlertPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Emp } return ret, nil // { - // "description": "Deletes an alerting policy.", - // "flatPath": "v3/projects/{projectsId}/alertPolicies/{alertPoliciesId}", - // "httpMethod": "DELETE", - // "id": "monitoring.projects.alertPolicies.delete", + // "description": "Creates a new group.", + // "flatPath": "v3/projects/{projectsId}/groups", + // "httpMethod": "POST", + // "id": "monitoring.projects.groups.create", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The alerting policy to delete. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]\nFor more information, see AlertPolicy.", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) in which to create the group. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", - // "pattern": "^projects/[^/]+/alertPolicies/[^/]+$", + // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, validate this request but do not create the group.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "v3/{+name}", + // "path": "v3/{+name}/groups", + // "request": { + // "$ref": "Group" + // }, // "response": { - // "$ref": "Empty" + // "$ref": "Group" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5843,75 +9811,73 @@ func (c *ProjectsAlertPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Emp } -// method id "monitoring.projects.alertPolicies.get": +// method id "monitoring.projects.groups.delete": -type ProjectsAlertPoliciesGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsGroupsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a single alerting policy. -func (r *ProjectsAlertPoliciesService) Get(name string) *ProjectsAlertPoliciesGetCall { - c := &ProjectsAlertPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes an existing group. +// +// - name: The group to delete. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]. +func (r *ProjectsGroupsService) Delete(name string) *ProjectsGroupsDeleteCall { + c := &ProjectsGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } +// Recursive sets the optional parameter "recursive": If this field is +// true, then the request means to delete a group with all its +// descendants. Otherwise, the request means to delete a group only when +// it has no descendants. The default value is false. +func (c *ProjectsGroupsDeleteCall) Recursive(recursive bool) *ProjectsGroupsDeleteCall { + c.urlParams_.Set("recursive", fmt.Sprint(recursive)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsAlertPoliciesGetCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesGetCall { +func (c *ProjectsGroupsDeleteCall) Fields(s ...googleapi.Field) *ProjectsGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsAlertPoliciesGetCall) IfNoneMatch(entityTag string) *ProjectsAlertPoliciesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsAlertPoliciesGetCall) Context(ctx context.Context) *ProjectsAlertPoliciesGetCall { +func (c *ProjectsGroupsDeleteCall) Context(ctx context.Context) *ProjectsGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsAlertPoliciesGetCall) Header() http.Header { +func (c *ProjectsGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsAlertPoliciesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -5922,33 +9888,33 @@ func (c *ProjectsAlertPoliciesGetCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.alertPolicies.get" call. -// Exactly one of *AlertPolicy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AlertPolicy.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsAlertPoliciesGetCall) Do(opts ...googleapi.CallOption) (*AlertPolicy, error) { +// Do executes the "monitoring.projects.groups.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &AlertPolicy{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5960,93 +9926,64 @@ func (c *ProjectsAlertPoliciesGetCall) Do(opts ...googleapi.CallOption) (*AlertP } return ret, nil // { - // "description": "Gets a single alerting policy.", - // "flatPath": "v3/projects/{projectsId}/alertPolicies/{alertPoliciesId}", - // "httpMethod": "GET", - // "id": "monitoring.projects.alertPolicies.get", + // "description": "Deletes an existing group.", + // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", + // "httpMethod": "DELETE", + // "id": "monitoring.projects.groups.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The alerting policy to retrieve. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]\n", + // "description": "Required. The group to delete. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] ", // "location": "path", - // "pattern": "^projects/[^/]+/alertPolicies/[^/]+$", + // "pattern": "^projects/[^/]+/groups/[^/]+$", // "required": true, // "type": "string" + // }, + // "recursive": { + // "description": "If this field is true, then the request means to delete a group with all its descendants. Otherwise, the request means to delete a group only when it has no descendants. The default value is false.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "v3/{+name}", // "response": { - // "$ref": "AlertPolicy" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" + // "https://www.googleapis.com/auth/monitoring" // ] // } } -// method id "monitoring.projects.alertPolicies.list": - -type ProjectsAlertPoliciesListCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists the existing alerting policies for the workspace. -func (r *ProjectsAlertPoliciesService) List(name string) *ProjectsAlertPoliciesListCall { - c := &ProjectsAlertPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Filter sets the optional parameter "filter": If provided, this field -// specifies the criteria that must be met by alert policies to be -// included in the response.For more details, see sorting and filtering -// (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). -func (c *ProjectsAlertPoliciesListCall) Filter(filter string) *ProjectsAlertPoliciesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// OrderBy sets the optional parameter "orderBy": A comma-separated list -// of fields by which to sort the result. Supports the same set of field -// references as the filter field. Entries can be prefixed with a minus -// sign to sort by the field in descending order.For more details, see -// sorting and filtering -// (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). -func (c *ProjectsAlertPoliciesListCall) OrderBy(orderBy string) *ProjectsAlertPoliciesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return in a single response. -func (c *ProjectsAlertPoliciesListCall) PageSize(pageSize int64) *ProjectsAlertPoliciesListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c +// method id "monitoring.projects.groups.get": + +type ProjectsGroupsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return more results from the previous method call. -func (c *ProjectsAlertPoliciesListCall) PageToken(pageToken string) *ProjectsAlertPoliciesListCall { - c.urlParams_.Set("pageToken", pageToken) +// Get: Gets a single group. +// +// - name: The group to retrieve. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]. +func (r *ProjectsGroupsService) Get(name string) *ProjectsGroupsGetCall { + c := &ProjectsGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsAlertPoliciesListCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesListCall { +func (c *ProjectsGroupsGetCall) Fields(s ...googleapi.Field) *ProjectsGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6056,7 +9993,7 @@ func (c *ProjectsAlertPoliciesListCall) Fields(s ...googleapi.Field) *ProjectsAl // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsAlertPoliciesListCall) IfNoneMatch(entityTag string) *ProjectsAlertPoliciesListCall { +func (c *ProjectsGroupsGetCall) IfNoneMatch(entityTag string) *ProjectsGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -6064,23 +10001,23 @@ func (c *ProjectsAlertPoliciesListCall) IfNoneMatch(entityTag string) *ProjectsA // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsAlertPoliciesListCall) Context(ctx context.Context) *ProjectsAlertPoliciesListCall { +func (c *ProjectsGroupsGetCall) Context(ctx context.Context) *ProjectsGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsAlertPoliciesListCall) Header() http.Header { +func (c *ProjectsGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsAlertPoliciesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -6091,7 +10028,7 @@ func (c *ProjectsAlertPoliciesListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/alertPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -6104,33 +10041,33 @@ func (c *ProjectsAlertPoliciesListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.alertPolicies.list" call. -// Exactly one of *ListAlertPoliciesResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ListAlertPoliciesResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsAlertPoliciesListCall) Do(opts ...googleapi.CallOption) (*ListAlertPoliciesResponse, error) { +// Do executes the "monitoring.projects.groups.get" call. +// Exactly one of *Group or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Group.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGroupsGetCall) Do(opts ...googleapi.CallOption) (*Group, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ListAlertPoliciesResponse{ + ret := &Group{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6142,46 +10079,25 @@ func (c *ProjectsAlertPoliciesListCall) Do(opts ...googleapi.CallOption) (*ListA } return ret, nil // { - // "description": "Lists the existing alerting policies for the workspace.", - // "flatPath": "v3/projects/{projectsId}/alertPolicies", + // "description": "Gets a single group.", + // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", // "httpMethod": "GET", - // "id": "monitoring.projects.alertPolicies.list", + // "id": "monitoring.projects.groups.get", // "parameterOrder": [ // "name" // ], // "parameters": { - // "filter": { - // "description": "If provided, this field specifies the criteria that must be met by alert policies to be included in the response.For more details, see sorting and filtering (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, // "name": { - // "description": "Required. The project whose alert policies are to be listed. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\nNote that this field names the parent container in which the alerting policies to be listed are stored. To retrieve a single alerting policy by name, use the GetAlertPolicy operation, instead.", + // "description": "Required. The group to retrieve. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] ", // "location": "path", - // "pattern": "^projects/[^/]+$", + // "pattern": "^projects/[^/]+/groups/[^/]+$", // "required": true, // "type": "string" - // }, - // "orderBy": { - // "description": "A comma-separated list of fields by which to sort the result. Supports the same set of field references as the filter field. Entries can be prefixed with a minus sign to sort by the field in descending order.For more details, see sorting and filtering (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "The maximum number of results to return in a single response.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return more results from the previous method call.", - // "location": "query", - // "type": "string" // } // }, - // "path": "v3/{+name}/alertPolicies", + // "path": "v3/{+name}", // "response": { - // "$ref": "ListAlertPoliciesResponse" + // "$ref": "Group" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -6192,116 +10108,129 @@ func (c *ProjectsAlertPoliciesListCall) Do(opts ...googleapi.CallOption) (*ListA } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsAlertPoliciesListCall) Pages(ctx context.Context, f func(*ListAlertPoliciesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } +// method id "monitoring.projects.groups.list": + +type ProjectsGroupsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// method id "monitoring.projects.alertPolicies.patch": +// List: Lists the existing groups. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) whose +// groups are to be listed. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. +func (r *ProjectsGroupsService) List(name string) *ProjectsGroupsListCall { + c := &ProjectsGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} -type ProjectsAlertPoliciesPatchCall struct { - s *Service - name string - alertpolicy *AlertPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// AncestorsOfGroup sets the optional parameter "ancestorsOfGroup": A +// group name. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns groups that +// are ancestors of the specified group. The groups are returned in +// order, starting with the immediate parent and ending with the most +// distant ancestor. If the specified group has no immediate parent, the +// results are empty. +func (c *ProjectsGroupsListCall) AncestorsOfGroup(ancestorsOfGroup string) *ProjectsGroupsListCall { + c.urlParams_.Set("ancestorsOfGroup", ancestorsOfGroup) + return c } -// Patch: Updates an alerting policy. You can either replace the entire -// policy with a new one or replace only certain fields in the current -// alerting policy by specifying the fields to be updated via -// updateMask. Returns the updated alerting policy. -func (r *ProjectsAlertPoliciesService) Patch(name string, alertpolicy *AlertPolicy) *ProjectsAlertPoliciesPatchCall { - c := &ProjectsAlertPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.alertpolicy = alertpolicy +// ChildrenOfGroup sets the optional parameter "childrenOfGroup": A +// group name. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns groups +// whose parent_name field contains the group name. If no groups have +// this parent, the results are empty. +func (c *ProjectsGroupsListCall) ChildrenOfGroup(childrenOfGroup string) *ProjectsGroupsListCall { + c.urlParams_.Set("childrenOfGroup", childrenOfGroup) return c } -// UpdateMask sets the optional parameter "updateMask": A list of -// alerting policy field names. If this field is not empty, each listed -// field in the existing alerting policy is set to the value of the -// corresponding field in the supplied policy (alert_policy), or to the -// field's default value if the field is not in the supplied alerting -// policy. Fields not listed retain their previous value.Examples of -// valid field masks include display_name, documentation, -// documentation.content, documentation.mime_type, user_labels, -// user_label.nameofkey, enabled, conditions, combiner, etc.If this -// field is empty, then the supplied alerting policy replaces the -// existing policy. It is the same as deleting the existing policy and -// adding the supplied policy, except for the following: -// The new policy will have the same [ALERT_POLICY_ID] as the former -// policy. This gives you continuity with the former policy in your -// notifications and incidents. -// Conditions in the new policy will keep their former [CONDITION_ID] if -// the supplied condition includes the name field with that -// [CONDITION_ID]. If the supplied condition omits the name field, then -// a new [CONDITION_ID] is created. -func (c *ProjectsAlertPoliciesPatchCall) UpdateMask(updateMask string) *ProjectsAlertPoliciesPatchCall { - c.urlParams_.Set("updateMask", updateMask) +// DescendantsOfGroup sets the optional parameter "descendantsOfGroup": +// A group name. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns the +// descendants of the specified group. This is a superset of the results +// returned by the children_of_group filter, and includes +// children-of-children, and so forth. +func (c *ProjectsGroupsListCall) DescendantsOfGroup(descendantsOfGroup string) *ProjectsGroupsListCall { + c.urlParams_.Set("descendantsOfGroup", descendantsOfGroup) + return c +} + +// PageSize sets the optional parameter "pageSize": A positive number +// that is the maximum number of results to return. +func (c *ProjectsGroupsListCall) PageSize(pageSize int64) *ProjectsGroupsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If this field is +// not empty then it must contain the next_page_token value returned by +// a previous call to this method. Using this field causes the method to +// return additional results from the previous method call. +func (c *ProjectsGroupsListCall) PageToken(pageToken string) *ProjectsGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsAlertPoliciesPatchCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesPatchCall { +func (c *ProjectsGroupsListCall) Fields(s ...googleapi.Field) *ProjectsGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGroupsListCall) IfNoneMatch(entityTag string) *ProjectsGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsAlertPoliciesPatchCall) Context(ctx context.Context) *ProjectsAlertPoliciesPatchCall { +func (c *ProjectsGroupsListCall) Context(ctx context.Context) *ProjectsGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsAlertPoliciesPatchCall) Header() http.Header { +func (c *ProjectsGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsAlertPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.alertpolicy) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/groups") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -6312,33 +10241,33 @@ func (c *ProjectsAlertPoliciesPatchCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.alertPolicies.patch" call. -// Exactly one of *AlertPolicy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AlertPolicy.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsAlertPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*AlertPolicy, error) { +// Do executes the "monitoring.projects.groups.list" call. +// Exactly one of *ListGroupsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListGroupsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsGroupsListCall) Do(opts ...googleapi.CallOption) (*ListGroupsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &AlertPolicy{ + ret := &ListGroupsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6350,69 +10279,119 @@ func (c *ProjectsAlertPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Aler } return ret, nil // { - // "description": "Updates an alerting policy. You can either replace the entire policy with a new one or replace only certain fields in the current alerting policy by specifying the fields to be updated via updateMask. Returns the updated alerting policy.", - // "flatPath": "v3/projects/{projectsId}/alertPolicies/{alertPoliciesId}", - // "httpMethod": "PATCH", - // "id": "monitoring.projects.alertPolicies.patch", + // "description": "Lists the existing groups.", + // "flatPath": "v3/projects/{projectsId}/groups", + // "httpMethod": "GET", + // "id": "monitoring.projects.groups.list", // "parameterOrder": [ // "name" // ], // "parameters": { + // "ancestorsOfGroup": { + // "description": "A group name. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty.", + // "location": "query", + // "type": "string" + // }, + // "childrenOfGroup": { + // "description": "A group name. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns groups whose parent_name field contains the group name. If no groups have this parent, the results are empty.", + // "location": "query", + // "type": "string" + // }, + // "descendantsOfGroup": { + // "description": "A group name. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] Returns the descendants of the specified group. This is a superset of the results returned by the children_of_group filter, and includes children-of-children, and so forth.", + // "location": "query", + // "type": "string" + // }, // "name": { - // "description": "Required if the policy exists. The resource name for this policy. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]\n[ALERT_POLICY_ID] is assigned by Stackdriver Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) whose groups are to be listed. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", - // "pattern": "^projects/[^/]+/alertPolicies/[^/]+$", + // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // }, - // "updateMask": { - // "description": "Optional. A list of alerting policy field names. If this field is not empty, each listed field in the existing alerting policy is set to the value of the corresponding field in the supplied policy (alert_policy), or to the field's default value if the field is not in the supplied alerting policy. Fields not listed retain their previous value.Examples of valid field masks include display_name, documentation, documentation.content, documentation.mime_type, user_labels, user_label.nameofkey, enabled, conditions, combiner, etc.If this field is empty, then the supplied alerting policy replaces the existing policy. It is the same as deleting the existing policy and adding the supplied policy, except for the following:\nThe new policy will have the same [ALERT_POLICY_ID] as the former policy. This gives you continuity with the former policy in your notifications and incidents.\nConditions in the new policy will keep their former [CONDITION_ID] if the supplied condition includes the name field with that [CONDITION_ID]. If the supplied condition omits the name field, then a new [CONDITION_ID] is created.", - // "format": "google-fieldmask", + // "pageSize": { + // "description": "A positive number that is the maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If this field is not empty then it must contain the next_page_token value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", // "location": "query", // "type": "string" // } // }, - // "path": "v3/{+name}", - // "request": { - // "$ref": "AlertPolicy" - // }, + // "path": "v3/{+name}/groups", // "response": { - // "$ref": "AlertPolicy" + // "$ref": "ListGroupsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read" // ] // } } -// method id "monitoring.projects.collectdTimeSeries.create": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsGroupsListCall) Pages(ctx context.Context, f func(*ListGroupsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsCollectdTimeSeriesCreateCall struct { - s *Service - name string - createcollectdtimeseriesrequest *CreateCollectdTimeSeriesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "monitoring.projects.groups.update": + +type ProjectsGroupsUpdateCall struct { + s *Service + name string + group *Group + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Create: Stackdriver Monitoring Agent only: Creates a new time -// series. -func (r *ProjectsCollectdTimeSeriesService) Create(name string, createcollectdtimeseriesrequest *CreateCollectdTimeSeriesRequest) *ProjectsCollectdTimeSeriesCreateCall { - c := &ProjectsCollectdTimeSeriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates an existing group. You can change any group +// attributes except name. +// +// - name: Output only. The name of this group. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] When creating a +// group, this field is ignored and a new name is created consisting +// of the project specified in the call to CreateGroup and a unique +// [GROUP_ID] that is generated automatically. +func (r *ProjectsGroupsService) Update(name string, group *Group) *ProjectsGroupsUpdateCall { + c := &ProjectsGroupsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.createcollectdtimeseriesrequest = createcollectdtimeseriesrequest + c.group = group + return c +} + +// ValidateOnly sets the optional parameter "validateOnly": If true, +// validate this request but do not update the existing group. +func (c *ProjectsGroupsUpdateCall) ValidateOnly(validateOnly bool) *ProjectsGroupsUpdateCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsCollectdTimeSeriesCreateCall) Fields(s ...googleapi.Field) *ProjectsCollectdTimeSeriesCreateCall { +func (c *ProjectsGroupsUpdateCall) Fields(s ...googleapi.Field) *ProjectsGroupsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6420,38 +10399,38 @@ func (c *ProjectsCollectdTimeSeriesCreateCall) Fields(s ...googleapi.Field) *Pro // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsCollectdTimeSeriesCreateCall) Context(ctx context.Context) *ProjectsCollectdTimeSeriesCreateCall { +func (c *ProjectsGroupsUpdateCall) Context(ctx context.Context) *ProjectsGroupsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsCollectdTimeSeriesCreateCall) Header() http.Header { +func (c *ProjectsGroupsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsCollectdTimeSeriesCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGroupsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.createcollectdtimeseriesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/collectdTimeSeries") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } @@ -6462,33 +10441,33 @@ func (c *ProjectsCollectdTimeSeriesCreateCall) doRequest(alt string) (*http.Resp return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.collectdTimeSeries.create" call. -// Exactly one of *CreateCollectdTimeSeriesResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *CreateCollectdTimeSeriesResponse.ServerResponse.Header or (if -// a response was returned at all) in error.(*googleapi.Error).Header. -// Use googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsCollectdTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) (*CreateCollectdTimeSeriesResponse, error) { +// Do executes the "monitoring.projects.groups.update" call. +// Exactly one of *Group or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Group.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGroupsUpdateCall) Do(opts ...googleapi.CallOption) (*Group, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &CreateCollectdTimeSeriesResponse{ + ret := &Group{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6500,107 +10479,158 @@ func (c *ProjectsCollectdTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Stackdriver Monitoring Agent only: Creates a new time series.\u003caside class=\"caution\"\u003eThis method is only for use by the Stackdriver Monitoring Agent. Use projects.timeSeries.create instead.\u003c/aside\u003e", - // "flatPath": "v3/projects/{projectsId}/collectdTimeSeries", - // "httpMethod": "POST", - // "id": "monitoring.projects.collectdTimeSeries.create", + // "description": "Updates an existing group. You can change any group attributes except name.", + // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", + // "httpMethod": "PUT", + // "id": "monitoring.projects.groups.update", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "The project in which to create the time series. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Output only. The name of this group. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique [GROUP_ID] that is generated automatically.", // "location": "path", - // "pattern": "^projects/[^/]+$", + // "pattern": "^projects/[^/]+/groups/[^/]+$", // "required": true, // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, validate this request but do not update the existing group.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "v3/{+name}/collectdTimeSeries", + // "path": "v3/{+name}", // "request": { - // "$ref": "CreateCollectdTimeSeriesRequest" + // "$ref": "Group" // }, // "response": { - // "$ref": "CreateCollectdTimeSeriesResponse" + // "$ref": "Group" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.write" + // "https://www.googleapis.com/auth/monitoring" // ] // } } -// method id "monitoring.projects.groups.create": +// method id "monitoring.projects.groups.members.list": -type ProjectsGroupsCreateCall struct { - s *Service - name string - group *Group - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsGroupsMembersListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Create: Creates a new group. -func (r *ProjectsGroupsService) Create(name string, group *Group) *ProjectsGroupsCreateCall { - c := &ProjectsGroupsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists the monitored resources that are members of a group. +// +// - name: The group whose members are listed. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]. +func (r *ProjectsGroupsMembersService) List(name string) *ProjectsGroupsMembersListCall { + c := &ProjectsGroupsMembersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.group = group return c } -// ValidateOnly sets the optional parameter "validateOnly": If true, -// validate this request but do not create the group. -func (c *ProjectsGroupsCreateCall) ValidateOnly(validateOnly bool) *ProjectsGroupsCreateCall { - c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) +// Filter sets the optional parameter "filter": An optional list filter +// (https://cloud.google.com/monitoring/api/learn_more#filtering) +// describing the members to be returned. The filter may reference the +// type, labels, and metadata of monitored resources that comprise the +// group. For example, to return only resources representing Compute +// Engine VM instances, use this filter: `resource.type = +// "gce_instance" +func (c *ProjectsGroupsMembersListCall) Filter(filter string) *ProjectsGroupsMembersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IntervalEndTime sets the optional parameter "interval.endTime": +// Required. The end of the time interval. +func (c *ProjectsGroupsMembersListCall) IntervalEndTime(intervalEndTime string) *ProjectsGroupsMembersListCall { + c.urlParams_.Set("interval.endTime", intervalEndTime) + return c +} + +// IntervalStartTime sets the optional parameter "interval.startTime": +// The beginning of the time interval. The default value for the start +// time is the end time. The start time must not be later than the end +// time. +func (c *ProjectsGroupsMembersListCall) IntervalStartTime(intervalStartTime string) *ProjectsGroupsMembersListCall { + c.urlParams_.Set("interval.startTime", intervalStartTime) + return c +} + +// PageSize sets the optional parameter "pageSize": A positive number +// that is the maximum number of results to return. +func (c *ProjectsGroupsMembersListCall) PageSize(pageSize int64) *ProjectsGroupsMembersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If this field is +// not empty then it must contain the next_page_token value returned by +// a previous call to this method. Using this field causes the method to +// return additional results from the previous method call. +func (c *ProjectsGroupsMembersListCall) PageToken(pageToken string) *ProjectsGroupsMembersListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGroupsCreateCall) Fields(s ...googleapi.Field) *ProjectsGroupsCreateCall { +func (c *ProjectsGroupsMembersListCall) Fields(s ...googleapi.Field) *ProjectsGroupsMembersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGroupsMembersListCall) IfNoneMatch(entityTag string) *ProjectsGroupsMembersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGroupsCreateCall) Context(ctx context.Context) *ProjectsGroupsCreateCall { +func (c *ProjectsGroupsMembersListCall) Context(ctx context.Context) *ProjectsGroupsMembersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGroupsCreateCall) Header() http.Header { +func (c *ProjectsGroupsMembersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGroupsCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGroupsMembersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/groups") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/members") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -6611,33 +10641,33 @@ func (c *ProjectsGroupsCreateCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.groups.create" call. -// Exactly one of *Group or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Group.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGroupsCreateCall) Do(opts ...googleapi.CallOption) (*Group, error) { +// Do executes the "monitoring.projects.groups.members.list" call. +// Exactly one of *ListGroupMembersResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *ListGroupMembersResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsGroupsMembersListCall) Do(opts ...googleapi.CallOption) (*ListGroupMembersResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Group{ + ret := &ListGroupMembersResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6649,72 +10679,116 @@ func (c *ProjectsGroupsCreateCall) Do(opts ...googleapi.CallOption) (*Group, err } return ret, nil // { - // "description": "Creates a new group.", - // "flatPath": "v3/projects/{projectsId}/groups", - // "httpMethod": "POST", - // "id": "monitoring.projects.groups.create", + // "description": "Lists the monitored resources that are members of a group.", + // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}/members", + // "httpMethod": "GET", + // "id": "monitoring.projects.groups.members.list", // "parameterOrder": [ // "name" // ], // "parameters": { + // "filter": { + // "description": "An optional list filter (https://cloud.google.com/monitoring/api/learn_more#filtering) describing the members to be returned. The filter may reference the type, labels, and metadata of monitored resources that comprise the group. For example, to return only resources representing Compute Engine VM instances, use this filter: `resource.type = \"gce_instance\"` ", + // "location": "query", + // "type": "string" + // }, + // "interval.endTime": { + // "description": "Required. The end of the time interval.", + // "format": "google-datetime", + // "location": "query", + // "type": "string" + // }, + // "interval.startTime": { + // "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", + // "format": "google-datetime", + // "location": "query", + // "type": "string" + // }, // "name": { - // "description": "Required. The project in which to create the group. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. The group whose members are listed. The format is: projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] ", // "location": "path", - // "pattern": "^projects/[^/]+$", + // "pattern": "^projects/[^/]+/groups/[^/]+$", // "required": true, // "type": "string" // }, - // "validateOnly": { - // "description": "If true, validate this request but do not create the group.", + // "pageSize": { + // "description": "A positive number that is the maximum number of results to return.", + // "format": "int32", // "location": "query", - // "type": "boolean" + // "type": "integer" + // }, + // "pageToken": { + // "description": "If this field is not empty then it must contain the next_page_token value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + // "location": "query", + // "type": "string" // } // }, - // "path": "v3/{+name}/groups", - // "request": { - // "$ref": "Group" - // }, + // "path": "v3/{+name}/members", // "response": { - // "$ref": "Group" + // "$ref": "ListGroupMembersResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read" // ] // } } -// method id "monitoring.projects.groups.delete": - -type ProjectsGroupsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsGroupsMembersListCall) Pages(ctx context.Context, f func(*ListGroupMembersResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Delete: Deletes an existing group. -func (r *ProjectsGroupsService) Delete(name string) *ProjectsGroupsDeleteCall { - c := &ProjectsGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c +// method id "monitoring.projects.metricDescriptors.create": + +type ProjectsMetricDescriptorsCreateCall struct { + s *Service + name string + metricdescriptor *MetricDescriptor + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Recursive sets the optional parameter "recursive": If this field is -// true, then the request means to delete a group with all its -// descendants. Otherwise, the request means to delete a group only when -// it has no descendants. The default value is false. -func (c *ProjectsGroupsDeleteCall) Recursive(recursive bool) *ProjectsGroupsDeleteCall { - c.urlParams_.Set("recursive", fmt.Sprint(recursive)) +// Create: Creates a new metric descriptor. The creation is executed +// asynchronously. User-created metric descriptors define custom metrics +// (https://cloud.google.com/monitoring/custom-metrics). The metric +// descriptor is updated if it already exists, except that metric labels +// are never removed. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) on which +// to execute the request. The format is: 4 +// projects/PROJECT_ID_OR_NUMBER. +func (r *ProjectsMetricDescriptorsService) Create(name string, metricdescriptor *MetricDescriptor) *ProjectsMetricDescriptorsCreateCall { + c := &ProjectsMetricDescriptorsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.metricdescriptor = metricdescriptor return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGroupsDeleteCall) Fields(s ...googleapi.Field) *ProjectsGroupsDeleteCall { +func (c *ProjectsMetricDescriptorsCreateCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6722,33 +10796,38 @@ func (c *ProjectsGroupsDeleteCall) Fields(s ...googleapi.Field) *ProjectsGroupsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGroupsDeleteCall) Context(ctx context.Context) *ProjectsGroupsDeleteCall { +func (c *ProjectsMetricDescriptorsCreateCall) Context(ctx context.Context) *ProjectsMetricDescriptorsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGroupsDeleteCall) Header() http.Header { +func (c *ProjectsMetricDescriptorsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMetricDescriptorsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/metricDescriptors") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -6759,33 +10838,33 @@ func (c *ProjectsGroupsDeleteCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.groups.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +// Do executes the "monitoring.projects.metricDescriptors.create" call. +// Exactly one of *MetricDescriptor or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *MetricDescriptor.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsMetricDescriptorsCreateCall) Do(opts ...googleapi.CallOption) (*MetricDescriptor, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Empty{ + ret := &MetricDescriptor{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6797,53 +10876,57 @@ func (c *ProjectsGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, err } return ret, nil // { - // "description": "Deletes an existing group.", - // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - // "httpMethod": "DELETE", - // "id": "monitoring.projects.groups.delete", + // "description": "Creates a new metric descriptor. The creation is executed asynchronously. User-created metric descriptors define custom metrics (https://cloud.google.com/monitoring/custom-metrics). The metric descriptor is updated if it already exists, except that metric labels are never removed.", + // "flatPath": "v3/projects/{projectsId}/metricDescriptors", + // "httpMethod": "POST", + // "id": "monitoring.projects.metricDescriptors.create", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The group to delete. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\n", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: 4 projects/PROJECT_ID_OR_NUMBER", // "location": "path", - // "pattern": "^projects/[^/]+/groups/[^/]+$", + // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" - // }, - // "recursive": { - // "description": "If this field is true, then the request means to delete a group with all its descendants. Otherwise, the request means to delete a group only when it has no descendants. The default value is false.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "v3/{+name}", + // "path": "v3/{+name}/metricDescriptors", + // "request": { + // "$ref": "MetricDescriptor" + // }, // "response": { - // "$ref": "Empty" + // "$ref": "MetricDescriptor" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.write" // ] // } } -// method id "monitoring.projects.groups.get": +// method id "monitoring.projects.metricDescriptors.delete": -type ProjectsGroupsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsMetricDescriptorsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a single group. -func (r *ProjectsGroupsService) Get(name string) *ProjectsGroupsGetCall { - c := &ProjectsGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes a metric descriptor. Only user-created custom metrics +// (https://cloud.google.com/monitoring/custom-metrics) can be deleted. +// +// - name: The metric descriptor on which to execute the request. The +// format is: +// projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] An +// example of [METRIC_ID] is: "custom.googleapis.com/my_test_metric". +func (r *ProjectsMetricDescriptorsService) Delete(name string) *ProjectsMetricDescriptorsDeleteCall { + c := &ProjectsMetricDescriptorsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -6851,54 +10934,41 @@ func (r *ProjectsGroupsService) Get(name string) *ProjectsGroupsGetCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGroupsGetCall) Fields(s ...googleapi.Field) *ProjectsGroupsGetCall { +func (c *ProjectsMetricDescriptorsDeleteCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGroupsGetCall) IfNoneMatch(entityTag string) *ProjectsGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGroupsGetCall) Context(ctx context.Context) *ProjectsGroupsGetCall { +func (c *ProjectsMetricDescriptorsDeleteCall) Context(ctx context.Context) *ProjectsMetricDescriptorsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGroupsGetCall) Header() http.Header { +func (c *ProjectsMetricDescriptorsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMetricDescriptorsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -6909,33 +10979,33 @@ func (c *ProjectsGroupsGetCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.groups.get" call. -// Exactly one of *Group or error will be non-nil. Any non-2xx status +// Do executes the "monitoring.projects.metricDescriptors.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Group.ServerResponse.Header or (if a response was returned at all) +// *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ProjectsGroupsGetCall) Do(opts ...googleapi.CallOption) (*Group, error) { +func (c *ProjectsMetricDescriptorsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Group{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6947,109 +11017,62 @@ func (c *ProjectsGroupsGetCall) Do(opts ...googleapi.CallOption) (*Group, error) } return ret, nil // { - // "description": "Gets a single group.", - // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - // "httpMethod": "GET", - // "id": "monitoring.projects.groups.get", + // "description": "Deletes a metric descriptor. Only user-created custom metrics (https://cloud.google.com/monitoring/custom-metrics) can be deleted.", + // "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", + // "httpMethod": "DELETE", + // "id": "monitoring.projects.metricDescriptors.delete", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The group to retrieve. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\n", + // "description": "Required. The metric descriptor on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] An example of [METRIC_ID] is: \"custom.googleapis.com/my_test_metric\".", // "location": "path", - // "pattern": "^projects/[^/]+/groups/[^/]+$", + // "pattern": "^projects/[^/]+/metricDescriptors/.*$", // "required": true, // "type": "string" // } // }, // "path": "v3/{+name}", // "response": { - // "$ref": "Group" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" + // "https://www.googleapis.com/auth/monitoring" // ] // } } -// method id "monitoring.projects.groups.list": +// method id "monitoring.projects.metricDescriptors.get": -type ProjectsGroupsListCall struct { +type ProjectsMetricDescriptorsGetCall struct { s *Service name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists the existing groups. -func (r *ProjectsGroupsService) List(name string) *ProjectsGroupsListCall { - c := &ProjectsGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// AncestorsOfGroup sets the optional parameter "ancestorsOfGroup": A -// group name. The format -// is: -// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] -// Returns groups that are ancestors of the specified group. The groups -// are returned in order, starting with the immediate parent and ending -// with the most distant ancestor. If the specified group has no -// immediate parent, the results are empty. -func (c *ProjectsGroupsListCall) AncestorsOfGroup(ancestorsOfGroup string) *ProjectsGroupsListCall { - c.urlParams_.Set("ancestorsOfGroup", ancestorsOfGroup) - return c -} - -// ChildrenOfGroup sets the optional parameter "childrenOfGroup": A -// group name. The format -// is: -// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] -// Returns groups whose parent_name field contains the group name. If no -// groups have this parent, the results are empty. -func (c *ProjectsGroupsListCall) ChildrenOfGroup(childrenOfGroup string) *ProjectsGroupsListCall { - c.urlParams_.Set("childrenOfGroup", childrenOfGroup) - return c -} - -// DescendantsOfGroup sets the optional parameter "descendantsOfGroup": -// A group name. The format -// is: -// projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID] -// Returns the descendants of the specified group. This is a superset of -// the results returned by the children_of_group filter, and includes -// children-of-children, and so forth. -func (c *ProjectsGroupsListCall) DescendantsOfGroup(descendantsOfGroup string) *ProjectsGroupsListCall { - c.urlParams_.Set("descendantsOfGroup", descendantsOfGroup) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. -func (c *ProjectsGroupsListCall) PageSize(pageSize int64) *ProjectsGroupsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the next_page_token value returned by -// a previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *ProjectsGroupsListCall) PageToken(pageToken string) *ProjectsGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) +// Get: Gets a single metric descriptor. +// +// - name: The metric descriptor on which to execute the request. The +// format is: +// projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] An +// example value of [METRIC_ID] is +// "compute.googleapis.com/instance/disk/read_bytes_count". +func (r *ProjectsMetricDescriptorsService) Get(name string) *ProjectsMetricDescriptorsGetCall { + c := &ProjectsMetricDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGroupsListCall) Fields(s ...googleapi.Field) *ProjectsGroupsListCall { +func (c *ProjectsMetricDescriptorsGetCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7059,7 +11082,7 @@ func (c *ProjectsGroupsListCall) Fields(s ...googleapi.Field) *ProjectsGroupsLis // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsGroupsListCall) IfNoneMatch(entityTag string) *ProjectsGroupsListCall { +func (c *ProjectsMetricDescriptorsGetCall) IfNoneMatch(entityTag string) *ProjectsMetricDescriptorsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -7067,23 +11090,23 @@ func (c *ProjectsGroupsListCall) IfNoneMatch(entityTag string) *ProjectsGroupsLi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGroupsListCall) Context(ctx context.Context) *ProjectsGroupsListCall { +func (c *ProjectsMetricDescriptorsGetCall) Context(ctx context.Context) *ProjectsMetricDescriptorsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGroupsListCall) Header() http.Header { +func (c *ProjectsMetricDescriptorsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMetricDescriptorsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -7094,7 +11117,7 @@ func (c *ProjectsGroupsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/groups") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7107,33 +11130,33 @@ func (c *ProjectsGroupsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.groups.list" call. -// Exactly one of *ListGroupsResponse or error will be non-nil. Any +// Do executes the "monitoring.projects.metricDescriptors.get" call. +// Exactly one of *MetricDescriptor or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ListGroupsResponse.ServerResponse.Header or (if a response was +// *MetricDescriptor.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsGroupsListCall) Do(opts ...googleapi.CallOption) (*ListGroupsResponse, error) { +func (c *ProjectsMetricDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*MetricDescriptor, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ListGroupsResponse{ + ret := &MetricDescriptor{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7145,152 +11168,139 @@ func (c *ProjectsGroupsListCall) Do(opts ...googleapi.CallOption) (*ListGroupsRe } return ret, nil // { - // "description": "Lists the existing groups.", - // "flatPath": "v3/projects/{projectsId}/groups", + // "description": "Gets a single metric descriptor.", + // "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", // "httpMethod": "GET", - // "id": "monitoring.projects.groups.list", + // "id": "monitoring.projects.metricDescriptors.get", // "parameterOrder": [ // "name" // ], // "parameters": { - // "ancestorsOfGroup": { - // "description": "A group name. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\nReturns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty.", - // "location": "query", - // "type": "string" - // }, - // "childrenOfGroup": { - // "description": "A group name. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\nReturns groups whose parent_name field contains the group name. If no groups have this parent, the results are empty.", - // "location": "query", - // "type": "string" - // }, - // "descendantsOfGroup": { - // "description": "A group name. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\nReturns the descendants of the specified group. This is a superset of the results returned by the children_of_group filter, and includes children-of-children, and so forth.", - // "location": "query", - // "type": "string" - // }, // "name": { - // "description": "Required. The project whose groups are to be listed. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. The metric descriptor on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID] An example value of [METRIC_ID] is \"compute.googleapis.com/instance/disk/read_bytes_count\".", // "location": "path", - // "pattern": "^projects/[^/]+$", + // "pattern": "^projects/[^/]+/metricDescriptors/.*$", // "required": true, // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the next_page_token value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" // } // }, - // "path": "v3/{+name}/groups", + // "path": "v3/{+name}", // "response": { - // "$ref": "ListGroupsResponse" + // "$ref": "MetricDescriptor" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" + // "https://www.googleapis.com/auth/monitoring.read", + // "https://www.googleapis.com/auth/monitoring.write" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsGroupsListCall) Pages(ctx context.Context, f func(*ListGroupsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } +// method id "monitoring.projects.metricDescriptors.list": + +type ProjectsMetricDescriptorsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// method id "monitoring.projects.groups.update": +// List: Lists metric descriptors that match a filter. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) on which +// to execute the request. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. +func (r *ProjectsMetricDescriptorsService) List(name string) *ProjectsMetricDescriptorsListCall { + c := &ProjectsMetricDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} -type ProjectsGroupsUpdateCall struct { - s *Service - name string - group *Group - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Filter sets the optional parameter "filter": If this field is empty, +// all custom and system-defined metric descriptors are returned. +// Otherwise, the filter +// (https://cloud.google.com/monitoring/api/v3/filters) specifies which +// metric descriptors are to be returned. For example, the following +// filter matches all custom metrics +// (https://cloud.google.com/monitoring/custom-metrics): metric.type = +// starts_with("custom.googleapis.com/") +func (c *ProjectsMetricDescriptorsListCall) Filter(filter string) *ProjectsMetricDescriptorsListCall { + c.urlParams_.Set("filter", filter) + return c } -// Update: Updates an existing group. You can change any group -// attributes except name. -func (r *ProjectsGroupsService) Update(name string, group *Group) *ProjectsGroupsUpdateCall { - c := &ProjectsGroupsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.group = group +// PageSize sets the optional parameter "pageSize": A positive number +// that is the maximum number of results to return. +func (c *ProjectsMetricDescriptorsListCall) PageSize(pageSize int64) *ProjectsMetricDescriptorsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// ValidateOnly sets the optional parameter "validateOnly": If true, -// validate this request but do not update the existing group. -func (c *ProjectsGroupsUpdateCall) ValidateOnly(validateOnly bool) *ProjectsGroupsUpdateCall { - c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) +// PageToken sets the optional parameter "pageToken": If this field is +// not empty then it must contain the nextPageToken value returned by a +// previous call to this method. Using this field causes the method to +// return additional results from the previous method call. +func (c *ProjectsMetricDescriptorsListCall) PageToken(pageToken string) *ProjectsMetricDescriptorsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGroupsUpdateCall) Fields(s ...googleapi.Field) *ProjectsGroupsUpdateCall { +func (c *ProjectsMetricDescriptorsListCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsMetricDescriptorsListCall) IfNoneMatch(entityTag string) *ProjectsMetricDescriptorsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGroupsUpdateCall) Context(ctx context.Context) *ProjectsGroupsUpdateCall { +func (c *ProjectsMetricDescriptorsListCall) Context(ctx context.Context) *ProjectsMetricDescriptorsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGroupsUpdateCall) Header() http.Header { +func (c *ProjectsMetricDescriptorsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGroupsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMetricDescriptorsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.group) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/metricDescriptors") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -7301,33 +11311,33 @@ func (c *ProjectsGroupsUpdateCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.groups.update" call. -// Exactly one of *Group or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Group.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGroupsUpdateCall) Do(opts ...googleapi.CallOption) (*Group, error) { +// Do executes the "monitoring.projects.metricDescriptors.list" call. +// Exactly one of *ListMetricDescriptorsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListMetricDescriptorsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsMetricDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMetricDescriptorsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Group{ + ret := &ListMetricDescriptorsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7339,45 +11349,76 @@ func (c *ProjectsGroupsUpdateCall) Do(opts ...googleapi.CallOption) (*Group, err } return ret, nil // { - // "description": "Updates an existing group. You can change any group attributes except name.", - // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}", - // "httpMethod": "PUT", - // "id": "monitoring.projects.groups.update", + // "description": "Lists metric descriptors that match a filter.", + // "flatPath": "v3/projects/{projectsId}/metricDescriptors", + // "httpMethod": "GET", + // "id": "monitoring.projects.metricDescriptors.list", // "parameterOrder": [ // "name" // ], // "parameters": { + // "filter": { + // "description": "If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the filter (https://cloud.google.com/monitoring/api/v3/filters) specifies which metric descriptors are to be returned. For example, the following filter matches all custom metrics (https://cloud.google.com/monitoring/custom-metrics): metric.type = starts_with(\"custom.googleapis.com/\") ", + // "location": "query", + // "type": "string" + // }, // "name": { - // "description": "Output only. The name of this group. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\nWhen creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique [GROUP_ID] that is generated automatically.", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", - // "pattern": "^projects/[^/]+/groups/[^/]+$", + // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // }, - // "validateOnly": { - // "description": "If true, validate this request but do not update the existing group.", + // "pageSize": { + // "description": "A positive number that is the maximum number of results to return.", + // "format": "int32", // "location": "query", - // "type": "boolean" + // "type": "integer" + // }, + // "pageToken": { + // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + // "location": "query", + // "type": "string" // } // }, - // "path": "v3/{+name}", - // "request": { - // "$ref": "Group" - // }, + // "path": "v3/{+name}/metricDescriptors", // "response": { - // "$ref": "Group" + // "$ref": "ListMetricDescriptorsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read", + // "https://www.googleapis.com/auth/monitoring.write" // ] // } } -// method id "monitoring.projects.groups.members.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsMetricDescriptorsListCall) Pages(ctx context.Context, f func(*ListMetricDescriptorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsGroupsMembersListCall struct { +// method id "monitoring.projects.monitoredResourceDescriptors.get": + +type ProjectsMonitoredResourceDescriptorsGetCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -7386,61 +11427,22 @@ type ProjectsGroupsMembersListCall struct { header_ http.Header } -// List: Lists the monitored resources that are members of a group. -func (r *ProjectsGroupsMembersService) List(name string) *ProjectsGroupsMembersListCall { - c := &ProjectsGroupsMembersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Gets a single monitored resource descriptor. +// +// - name: The monitored resource descriptor to get. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOUR +// CE_TYPE] The [RESOURCE_TYPE] is a predefined type, such as +// cloudsql_database. +func (r *ProjectsMonitoredResourceDescriptorsService) Get(name string) *ProjectsMonitoredResourceDescriptorsGetCall { + c := &ProjectsMonitoredResourceDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } -// Filter sets the optional parameter "filter": An optional list filter -// (https://cloud.google.com/monitoring/api/learn_more#filtering) -// describing the members to be returned. The filter may reference the -// type, labels, and metadata of monitored resources that comprise the -// group. For example, to return only resources representing Compute -// Engine VM instances, use this filter: -// `resource.type = "gce_instance" -func (c *ProjectsGroupsMembersListCall) Filter(filter string) *ProjectsGroupsMembersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IntervalEndTime sets the optional parameter "interval.endTime": -// Required. The end of the time interval. -func (c *ProjectsGroupsMembersListCall) IntervalEndTime(intervalEndTime string) *ProjectsGroupsMembersListCall { - c.urlParams_.Set("interval.endTime", intervalEndTime) - return c -} - -// IntervalStartTime sets the optional parameter "interval.startTime": -// The beginning of the time interval. The default value for the start -// time is the end time. The start time must not be later than the end -// time. -func (c *ProjectsGroupsMembersListCall) IntervalStartTime(intervalStartTime string) *ProjectsGroupsMembersListCall { - c.urlParams_.Set("interval.startTime", intervalStartTime) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. -func (c *ProjectsGroupsMembersListCall) PageSize(pageSize int64) *ProjectsGroupsMembersListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the next_page_token value returned by -// a previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *ProjectsGroupsMembersListCall) PageToken(pageToken string) *ProjectsGroupsMembersListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGroupsMembersListCall) Fields(s ...googleapi.Field) *ProjectsGroupsMembersListCall { +func (c *ProjectsMonitoredResourceDescriptorsGetCall) Fields(s ...googleapi.Field) *ProjectsMonitoredResourceDescriptorsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7450,7 +11452,7 @@ func (c *ProjectsGroupsMembersListCall) Fields(s ...googleapi.Field) *ProjectsGr // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsGroupsMembersListCall) IfNoneMatch(entityTag string) *ProjectsGroupsMembersListCall { +func (c *ProjectsMonitoredResourceDescriptorsGetCall) IfNoneMatch(entityTag string) *ProjectsMonitoredResourceDescriptorsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -7458,23 +11460,23 @@ func (c *ProjectsGroupsMembersListCall) IfNoneMatch(entityTag string) *ProjectsG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGroupsMembersListCall) Context(ctx context.Context) *ProjectsGroupsMembersListCall { +func (c *ProjectsMonitoredResourceDescriptorsGetCall) Context(ctx context.Context) *ProjectsMonitoredResourceDescriptorsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGroupsMembersListCall) Header() http.Header { +func (c *ProjectsMonitoredResourceDescriptorsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGroupsMembersListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMonitoredResourceDescriptorsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -7485,7 +11487,7 @@ func (c *ProjectsGroupsMembersListCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/members") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7498,33 +11500,33 @@ func (c *ProjectsGroupsMembersListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.groups.members.list" call. -// Exactly one of *ListGroupMembersResponse or error will be non-nil. +// Do executes the "monitoring.projects.monitoredResourceDescriptors.get" call. +// Exactly one of *MonitoredResourceDescriptor or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *ListGroupMembersResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// *MonitoredResourceDescriptor.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsGroupsMembersListCall) Do(opts ...googleapi.CallOption) (*ListGroupMembersResponse, error) { +func (c *ProjectsMonitoredResourceDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*MonitoredResourceDescriptor, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ListGroupMembersResponse{ + ret := &MonitoredResourceDescriptor{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7536,148 +11538,137 @@ func (c *ProjectsGroupsMembersListCall) Do(opts ...googleapi.CallOption) (*ListG } return ret, nil // { - // "description": "Lists the monitored resources that are members of a group.", - // "flatPath": "v3/projects/{projectsId}/groups/{groupsId}/members", + // "description": "Gets a single monitored resource descriptor.", + // "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors/{monitoredResourceDescriptorsId}", // "httpMethod": "GET", - // "id": "monitoring.projects.groups.members.list", + // "id": "monitoring.projects.monitoredResourceDescriptors.get", // "parameterOrder": [ // "name" // ], // "parameters": { - // "filter": { - // "description": "An optional list filter (https://cloud.google.com/monitoring/api/learn_more#filtering) describing the members to be returned. The filter may reference the type, labels, and metadata of monitored resources that comprise the group. For example, to return only resources representing Compute Engine VM instances, use this filter:\n`resource.type = \"gce_instance\"`\n", - // "location": "query", - // "type": "string" - // }, - // "interval.endTime": { - // "description": "Required. The end of the time interval.", - // "format": "google-datetime", - // "location": "query", - // "type": "string" - // }, - // "interval.startTime": { - // "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.", - // "format": "google-datetime", - // "location": "query", - // "type": "string" - // }, // "name": { - // "description": "Required. The group whose members are listed. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]\n", + // "description": "Required. The monitored resource descriptor to get. The format is: projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE] The [RESOURCE_TYPE] is a predefined type, such as cloudsql_database.", // "location": "path", - // "pattern": "^projects/[^/]+/groups/[^/]+$", + // "pattern": "^projects/[^/]+/monitoredResourceDescriptors/.*$", // "required": true, // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the next_page_token value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" // } // }, - // "path": "v3/{+name}/members", + // "path": "v3/{+name}", // "response": { - // "$ref": "ListGroupMembersResponse" + // "$ref": "MonitoredResourceDescriptor" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" + // "https://www.googleapis.com/auth/monitoring.read", + // "https://www.googleapis.com/auth/monitoring.write" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsGroupsMembersListCall) Pages(ctx context.Context, f func(*ListGroupMembersResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } +// method id "monitoring.projects.monitoredResourceDescriptors.list": + +type ProjectsMonitoredResourceDescriptorsListCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// method id "monitoring.projects.metricDescriptors.create": +// List: Lists monitored resource descriptors that match a filter. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) on which +// to execute the request. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. +func (r *ProjectsMonitoredResourceDescriptorsService) List(name string) *ProjectsMonitoredResourceDescriptorsListCall { + c := &ProjectsMonitoredResourceDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} -type ProjectsMetricDescriptorsCreateCall struct { - s *Service - name string - metricdescriptor *MetricDescriptor - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Filter sets the optional parameter "filter": An optional filter +// (https://cloud.google.com/monitoring/api/v3/filters) describing the +// descriptors to be returned. The filter can reference the descriptor's +// type and labels. For example, the following filter returns only +// Google Compute Engine descriptors that have an id label: +// resource.type = starts_with("gce_") AND resource.label:id +func (c *ProjectsMonitoredResourceDescriptorsListCall) Filter(filter string) *ProjectsMonitoredResourceDescriptorsListCall { + c.urlParams_.Set("filter", filter) + return c } -// Create: Creates a new metric descriptor. User-created metric -// descriptors define custom metrics -// (https://cloud.google.com/monitoring/custom-metrics). -func (r *ProjectsMetricDescriptorsService) Create(name string, metricdescriptor *MetricDescriptor) *ProjectsMetricDescriptorsCreateCall { - c := &ProjectsMetricDescriptorsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.metricdescriptor = metricdescriptor +// PageSize sets the optional parameter "pageSize": A positive number +// that is the maximum number of results to return. +func (c *ProjectsMonitoredResourceDescriptorsListCall) PageSize(pageSize int64) *ProjectsMonitoredResourceDescriptorsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If this field is +// not empty then it must contain the nextPageToken value returned by a +// previous call to this method. Using this field causes the method to +// return additional results from the previous method call. +func (c *ProjectsMonitoredResourceDescriptorsListCall) PageToken(pageToken string) *ProjectsMonitoredResourceDescriptorsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMetricDescriptorsCreateCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsCreateCall { +func (c *ProjectsMonitoredResourceDescriptorsListCall) Fields(s ...googleapi.Field) *ProjectsMonitoredResourceDescriptorsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsMonitoredResourceDescriptorsListCall) IfNoneMatch(entityTag string) *ProjectsMonitoredResourceDescriptorsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMetricDescriptorsCreateCall) Context(ctx context.Context) *ProjectsMetricDescriptorsCreateCall { +func (c *ProjectsMonitoredResourceDescriptorsListCall) Context(ctx context.Context) *ProjectsMonitoredResourceDescriptorsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMetricDescriptorsCreateCall) Header() http.Header { +func (c *ProjectsMonitoredResourceDescriptorsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMetricDescriptorsCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/metricDescriptors") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/monitoredResourceDescriptors") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -7688,33 +11679,35 @@ func (c *ProjectsMetricDescriptorsCreateCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.metricDescriptors.create" call. -// Exactly one of *MetricDescriptor or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *MetricDescriptor.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsMetricDescriptorsCreateCall) Do(opts ...googleapi.CallOption) (*MetricDescriptor, error) { +// Do executes the "monitoring.projects.monitoredResourceDescriptors.list" call. +// Exactly one of *ListMonitoredResourceDescriptorsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *ListMonitoredResourceDescriptorsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsMonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &MetricDescriptor{ + ret := &ListMonitoredResourceDescriptorsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7726,52 +11719,94 @@ func (c *ProjectsMetricDescriptorsCreateCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Creates a new metric descriptor. User-created metric descriptors define custom metrics (https://cloud.google.com/monitoring/custom-metrics).", - // "flatPath": "v3/projects/{projectsId}/metricDescriptors", - // "httpMethod": "POST", - // "id": "monitoring.projects.metricDescriptors.create", + // "description": "Lists monitored resource descriptors that match a filter.", + // "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors", + // "httpMethod": "GET", + // "id": "monitoring.projects.monitoredResourceDescriptors.list", // "parameterOrder": [ // "name" // ], // "parameters": { + // "filter": { + // "description": "An optional filter (https://cloud.google.com/monitoring/api/v3/filters) describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an id label: resource.type = starts_with(\"gce_\") AND resource.label:id ", + // "location": "query", + // "type": "string" + // }, // "name": { - // "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" + // }, + // "pageSize": { + // "description": "A positive number that is the maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", + // "location": "query", + // "type": "string" // } // }, - // "path": "v3/{+name}/metricDescriptors", - // "request": { - // "$ref": "MetricDescriptor" - // }, + // "path": "v3/{+name}/monitoredResourceDescriptors", // "response": { - // "$ref": "MetricDescriptor" + // "$ref": "ListMonitoredResourceDescriptorsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read", // "https://www.googleapis.com/auth/monitoring.write" // ] // } } -// method id "monitoring.projects.metricDescriptors.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsMonitoredResourceDescriptorsListCall) Pages(ctx context.Context, f func(*ListMonitoredResourceDescriptorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsMetricDescriptorsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "monitoring.projects.notificationChannelDescriptors.get": + +type ProjectsNotificationChannelDescriptorsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes a metric descriptor. Only user-created custom metrics -// (https://cloud.google.com/monitoring/custom-metrics) can be deleted. -func (r *ProjectsMetricDescriptorsService) Delete(name string) *ProjectsMetricDescriptorsDeleteCall { - c := &ProjectsMetricDescriptorsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Gets a single channel descriptor. The descriptor indicates which +// fields are expected / permitted for a notification channel of the +// given type. +// +// - name: The channel type for which to execute the request. The format +// is: +// projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHAN +// NEL_TYPE]. +func (r *ProjectsNotificationChannelDescriptorsService) Get(name string) *ProjectsNotificationChannelDescriptorsGetCall { + c := &ProjectsNotificationChannelDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } @@ -7779,41 +11814,54 @@ func (r *ProjectsMetricDescriptorsService) Delete(name string) *ProjectsMetricDe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMetricDescriptorsDeleteCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsDeleteCall { +func (c *ProjectsNotificationChannelDescriptorsGetCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelDescriptorsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsNotificationChannelDescriptorsGetCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelDescriptorsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMetricDescriptorsDeleteCall) Context(ctx context.Context) *ProjectsMetricDescriptorsDeleteCall { +func (c *ProjectsNotificationChannelDescriptorsGetCall) Context(ctx context.Context) *ProjectsNotificationChannelDescriptorsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMetricDescriptorsDeleteCall) Header() http.Header { +func (c *ProjectsNotificationChannelDescriptorsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMetricDescriptorsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsNotificationChannelDescriptorsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -7824,33 +11872,33 @@ func (c *ProjectsMetricDescriptorsDeleteCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.metricDescriptors.delete" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsMetricDescriptorsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +// Do executes the "monitoring.projects.notificationChannelDescriptors.get" call. +// Exactly one of *NotificationChannelDescriptor or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *NotificationChannelDescriptor.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsNotificationChannelDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*NotificationChannelDescriptor, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Empty{ + ret := &NotificationChannelDescriptor{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7862,37 +11910,38 @@ func (c *ProjectsMetricDescriptorsDeleteCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Deletes a metric descriptor. Only user-created custom metrics (https://cloud.google.com/monitoring/custom-metrics) can be deleted.", - // "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", - // "httpMethod": "DELETE", - // "id": "monitoring.projects.metricDescriptors.delete", + // "description": "Gets a single channel descriptor. The descriptor indicates which fields are expected / permitted for a notification channel of the given type.", + // "flatPath": "v3/projects/{projectsId}/notificationChannelDescriptors/{notificationChannelDescriptorsId}", + // "httpMethod": "GET", + // "id": "monitoring.projects.notificationChannelDescriptors.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The metric descriptor on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]\nAn example of [METRIC_ID] is: \"custom.googleapis.com/my_test_metric\".", + // "description": "Required. The channel type for which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE] ", // "location": "path", - // "pattern": "^projects/[^/]+/metricDescriptors/.*$", + // "pattern": "^projects/[^/]+/notificationChannelDescriptors/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v3/{+name}", // "response": { - // "$ref": "Empty" + // "$ref": "NotificationChannelDescriptor" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read" // ] // } } -// method id "monitoring.projects.metricDescriptors.get": +// method id "monitoring.projects.notificationChannelDescriptors.list": -type ProjectsMetricDescriptorsGetCall struct { +type ProjectsNotificationChannelDescriptorsListCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -7901,18 +11950,43 @@ type ProjectsMetricDescriptorsGetCall struct { header_ http.Header } -// Get: Gets a single metric descriptor. This method does not require a -// Workspace. -func (r *ProjectsMetricDescriptorsService) Get(name string) *ProjectsMetricDescriptorsGetCall { - c := &ProjectsMetricDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists the descriptors for supported channel types. The use of +// descriptors makes it possible for new channel types to be dynamically +// added. +// +// - name: The REST resource name of the parent from which to retrieve +// the notification channel descriptors. The expected syntax is: +// projects/[PROJECT_ID_OR_NUMBER] Note that this names +// (https://cloud.google.com/monitoring/api/v3#project_name) the +// parent container in which to look for the descriptors; to retrieve +// a single descriptor by name, use the +// GetNotificationChannelDescriptor operation, instead. +func (r *ProjectsNotificationChannelDescriptorsService) List(name string) *ProjectsNotificationChannelDescriptorsListCall { + c := &ProjectsNotificationChannelDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return in a single response. If not set to a positive +// number, a reasonable value will be chosen by the service. +func (c *ProjectsNotificationChannelDescriptorsListCall) PageSize(pageSize int64) *ProjectsNotificationChannelDescriptorsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": If non-empty, +// page_token must contain a value returned as the next_page_token in a +// previous response to request the next set of results. +func (c *ProjectsNotificationChannelDescriptorsListCall) PageToken(pageToken string) *ProjectsNotificationChannelDescriptorsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMetricDescriptorsGetCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsGetCall { +func (c *ProjectsNotificationChannelDescriptorsListCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelDescriptorsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7922,7 +11996,7 @@ func (c *ProjectsMetricDescriptorsGetCall) Fields(s ...googleapi.Field) *Project // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsMetricDescriptorsGetCall) IfNoneMatch(entityTag string) *ProjectsMetricDescriptorsGetCall { +func (c *ProjectsNotificationChannelDescriptorsListCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelDescriptorsListCall { c.ifNoneMatch_ = entityTag return c } @@ -7930,23 +12004,23 @@ func (c *ProjectsMetricDescriptorsGetCall) IfNoneMatch(entityTag string) *Projec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMetricDescriptorsGetCall) Context(ctx context.Context) *ProjectsMetricDescriptorsGetCall { +func (c *ProjectsNotificationChannelDescriptorsListCall) Context(ctx context.Context) *ProjectsNotificationChannelDescriptorsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMetricDescriptorsGetCall) Header() http.Header { +func (c *ProjectsNotificationChannelDescriptorsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMetricDescriptorsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsNotificationChannelDescriptorsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -7957,7 +12031,7 @@ func (c *ProjectsMetricDescriptorsGetCall) doRequest(alt string) (*http.Response var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/notificationChannelDescriptors") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7970,33 +12044,35 @@ func (c *ProjectsMetricDescriptorsGetCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.metricDescriptors.get" call. -// Exactly one of *MetricDescriptor or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *MetricDescriptor.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsMetricDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*MetricDescriptor, error) { +// Do executes the "monitoring.projects.notificationChannelDescriptors.list" call. +// Exactly one of *ListNotificationChannelDescriptorsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *ListNotificationChannelDescriptorsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsNotificationChannelDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListNotificationChannelDescriptorsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &MetricDescriptor{ + ret := &ListNotificationChannelDescriptorsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8008,135 +12084,143 @@ func (c *ProjectsMetricDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*Me } return ret, nil // { - // "description": "Gets a single metric descriptor. This method does not require a Workspace.", - // "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}", + // "description": "Lists the descriptors for supported channel types. The use of descriptors makes it possible for new channel types to be dynamically added.", + // "flatPath": "v3/projects/{projectsId}/notificationChannelDescriptors", // "httpMethod": "GET", - // "id": "monitoring.projects.metricDescriptors.get", + // "id": "monitoring.projects.notificationChannelDescriptors.list", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The metric descriptor on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]\nAn example value of [METRIC_ID] is \"compute.googleapis.com/instance/disk/read_bytes_count\".", + // "description": "Required. The REST resource name of the parent from which to retrieve the notification channel descriptors. The expected syntax is: projects/[PROJECT_ID_OR_NUMBER] Note that this names (https://cloud.google.com/monitoring/api/v3#project_name) the parent container in which to look for the descriptors; to retrieve a single descriptor by name, use the GetNotificationChannelDescriptor operation, instead.", // "location": "path", - // "pattern": "^projects/[^/]+/metricDescriptors/.*$", + // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of results to return in a single response. If not set to a positive number, a reasonable value will be chosen by the service.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "If non-empty, page_token must contain a value returned as the next_page_token in a previous response to request the next set of results.", + // "location": "query", + // "type": "string" // } // }, - // "path": "v3/{+name}", + // "path": "v3/{+name}/notificationChannelDescriptors", // "response": { - // "$ref": "MetricDescriptor" + // "$ref": "ListNotificationChannelDescriptorsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read", - // "https://www.googleapis.com/auth/monitoring.write" + // "https://www.googleapis.com/auth/monitoring.read" // ] // } } -// method id "monitoring.projects.metricDescriptors.list": - -type ProjectsMetricDescriptorsListCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists metric descriptors that match a filter. This method does -// not require a Workspace. -func (r *ProjectsMetricDescriptorsService) List(name string) *ProjectsMetricDescriptorsListCall { - c := &ProjectsMetricDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsNotificationChannelDescriptorsListCall) Pages(ctx context.Context, f func(*ListNotificationChannelDescriptorsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Filter sets the optional parameter "filter": If this field is empty, -// all custom and system-defined metric descriptors are returned. -// Otherwise, the filter -// (https://cloud.google.com/monitoring/api/v3/filters) specifies which -// metric descriptors are to be returned. For example, the following -// filter matches all custom metrics -// (https://cloud.google.com/monitoring/custom-metrics): -// metric.type = starts_with("custom.googleapis.com/") -func (c *ProjectsMetricDescriptorsListCall) Filter(filter string) *ProjectsMetricDescriptorsListCall { - c.urlParams_.Set("filter", filter) - return c -} +// method id "monitoring.projects.notificationChannels.create": -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. -func (c *ProjectsMetricDescriptorsListCall) PageSize(pageSize int64) *ProjectsMetricDescriptorsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c +type ProjectsNotificationChannelsCreateCall struct { + s *Service + name string + notificationchannel *NotificationChannel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *ProjectsMetricDescriptorsListCall) PageToken(pageToken string) *ProjectsMetricDescriptorsListCall { - c.urlParams_.Set("pageToken", pageToken) +// Create: Creates a new notification channel, representing a single +// notification endpoint such as an email address, SMS number, or +// PagerDuty service.Design your application to single-thread API calls +// that modify the state of notification channels in a single project. +// This includes calls to CreateNotificationChannel, +// DeleteNotificationChannel and UpdateNotificationChannel. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) on which +// to execute the request. The format is: +// projects/[PROJECT_ID_OR_NUMBER] This names the container into which +// the channel will be written, this does not name the newly created +// channel. The resulting channel's name will have a normalized +// version of this field as a prefix, but will add +// /notificationChannels/[CHANNEL_ID] to identify the channel. +func (r *ProjectsNotificationChannelsService) Create(name string, notificationchannel *NotificationChannel) *ProjectsNotificationChannelsCreateCall { + c := &ProjectsNotificationChannelsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.notificationchannel = notificationchannel return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMetricDescriptorsListCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsListCall { +func (c *ProjectsNotificationChannelsCreateCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsMetricDescriptorsListCall) IfNoneMatch(entityTag string) *ProjectsMetricDescriptorsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMetricDescriptorsListCall) Context(ctx context.Context) *ProjectsMetricDescriptorsListCall { +func (c *ProjectsNotificationChannelsCreateCall) Context(ctx context.Context) *ProjectsNotificationChannelsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMetricDescriptorsListCall) Header() http.Header { +func (c *ProjectsNotificationChannelsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMetricDescriptorsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsNotificationChannelsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notificationchannel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/metricDescriptors") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/notificationChannels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -8147,33 +12231,33 @@ func (c *ProjectsMetricDescriptorsListCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.metricDescriptors.list" call. -// Exactly one of *ListMetricDescriptorsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListMetricDescriptorsResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "monitoring.projects.notificationChannels.create" call. +// Exactly one of *NotificationChannel or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NotificationChannel.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsMetricDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMetricDescriptorsResponse, error) { +func (c *ProjectsNotificationChannelsCreateCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ListMetricDescriptorsResponse{ + ret := &NotificationChannel{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8185,143 +12269,109 @@ func (c *ProjectsMetricDescriptorsListCall) Do(opts ...googleapi.CallOption) (*L } return ret, nil // { - // "description": "Lists metric descriptors that match a filter. This method does not require a Workspace.", - // "flatPath": "v3/projects/{projectsId}/metricDescriptors", - // "httpMethod": "GET", - // "id": "monitoring.projects.metricDescriptors.list", + // "description": "Creates a new notification channel, representing a single notification endpoint such as an email address, SMS number, or PagerDuty service.Design your application to single-thread API calls that modify the state of notification channels in a single project. This includes calls to CreateNotificationChannel, DeleteNotificationChannel and UpdateNotificationChannel.", + // "flatPath": "v3/projects/{projectsId}/notificationChannels", + // "httpMethod": "POST", + // "id": "monitoring.projects.notificationChannels.create", // "parameterOrder": [ // "name" // ], // "parameters": { - // "filter": { - // "description": "If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the filter (https://cloud.google.com/monitoring/api/v3/filters) specifies which metric descriptors are to be returned. For example, the following filter matches all custom metrics (https://cloud.google.com/monitoring/custom-metrics):\nmetric.type = starts_with(\"custom.googleapis.com/\")\n", - // "location": "query", - // "type": "string" - // }, // "name": { - // "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] This names the container into which the channel will be written, this does not name the newly created channel. The resulting channel's name will have a normalized version of this field as a prefix, but will add /notificationChannels/[CHANNEL_ID] to identify the channel.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" // } // }, - // "path": "v3/{+name}/metricDescriptors", + // "path": "v3/{+name}/notificationChannels", + // "request": { + // "$ref": "NotificationChannel" + // }, // "response": { - // "$ref": "ListMetricDescriptorsResponse" + // "$ref": "NotificationChannel" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read", - // "https://www.googleapis.com/auth/monitoring.write" + // "https://www.googleapis.com/auth/monitoring" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsMetricDescriptorsListCall) Pages(ctx context.Context, f func(*ListMetricDescriptorsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "monitoring.projects.monitoredResourceDescriptors.get": +// method id "monitoring.projects.notificationChannels.delete": -type ProjectsMonitoredResourceDescriptorsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsNotificationChannelsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a single monitored resource descriptor. This method does -// not require a Workspace. -func (r *ProjectsMonitoredResourceDescriptorsService) Get(name string) *ProjectsMonitoredResourceDescriptorsGetCall { - c := &ProjectsMonitoredResourceDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes a notification channel.Design your application to +// single-thread API calls that modify the state of notification +// channels in a single project. This includes calls to +// CreateNotificationChannel, DeleteNotificationChannel and +// UpdateNotificationChannel. +// +// - name: The channel for which to execute the request. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]. +func (r *ProjectsNotificationChannelsService) Delete(name string) *ProjectsNotificationChannelsDeleteCall { + c := &ProjectsNotificationChannelsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } +// Force sets the optional parameter "force": If true, the notification +// channel will be deleted regardless of its use in alert policies (the +// policies will be updated to remove the channel). If false, channels +// that are still referenced by an existing alerting policy will fail to +// be deleted in a delete operation. +func (c *ProjectsNotificationChannelsDeleteCall) Force(force bool) *ProjectsNotificationChannelsDeleteCall { + c.urlParams_.Set("force", fmt.Sprint(force)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMonitoredResourceDescriptorsGetCall) Fields(s ...googleapi.Field) *ProjectsMonitoredResourceDescriptorsGetCall { +func (c *ProjectsNotificationChannelsDeleteCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsMonitoredResourceDescriptorsGetCall) IfNoneMatch(entityTag string) *ProjectsMonitoredResourceDescriptorsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMonitoredResourceDescriptorsGetCall) Context(ctx context.Context) *ProjectsMonitoredResourceDescriptorsGetCall { +func (c *ProjectsNotificationChannelsDeleteCall) Context(ctx context.Context) *ProjectsNotificationChannelsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMonitoredResourceDescriptorsGetCall) Header() http.Header { +func (c *ProjectsNotificationChannelsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMonitoredResourceDescriptorsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsNotificationChannelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -8332,33 +12382,33 @@ func (c *ProjectsMonitoredResourceDescriptorsGetCall) doRequest(alt string) (*ht return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.monitoredResourceDescriptors.get" call. -// Exactly one of *MonitoredResourceDescriptor or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *MonitoredResourceDescriptor.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsMonitoredResourceDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*MonitoredResourceDescriptor, error) { +// Do executes the "monitoring.projects.notificationChannels.delete" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsNotificationChannelsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &MonitoredResourceDescriptor{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8370,39 +12420,42 @@ func (c *ProjectsMonitoredResourceDescriptorsGetCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Gets a single monitored resource descriptor. This method does not require a Workspace.", - // "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors/{monitoredResourceDescriptorsId}", - // "httpMethod": "GET", - // "id": "monitoring.projects.monitoredResourceDescriptors.get", + // "description": "Deletes a notification channel.Design your application to single-thread API calls that modify the state of notification channels in a single project. This includes calls to CreateNotificationChannel, DeleteNotificationChannel and UpdateNotificationChannel.", + // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}", + // "httpMethod": "DELETE", + // "id": "monitoring.projects.notificationChannels.delete", // "parameterOrder": [ // "name" // ], // "parameters": { + // "force": { + // "description": "If true, the notification channel will be deleted regardless of its use in alert policies (the policies will be updated to remove the channel). If false, channels that are still referenced by an existing alerting policy will fail to be deleted in a delete operation.", + // "location": "query", + // "type": "boolean" + // }, // "name": { - // "description": "Required. The monitored resource descriptor to get. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE]\nThe [RESOURCE_TYPE] is a predefined type, such as cloudsql_database.", + // "description": "Required. The channel for which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] ", // "location": "path", - // "pattern": "^projects/[^/]+/monitoredResourceDescriptors/.*$", + // "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v3/{+name}", // "response": { - // "$ref": "MonitoredResourceDescriptor" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read", - // "https://www.googleapis.com/auth/monitoring.write" + // "https://www.googleapis.com/auth/monitoring" // ] // } } -// method id "monitoring.projects.monitoredResourceDescriptors.list": +// method id "monitoring.projects.notificationChannels.get": -type ProjectsMonitoredResourceDescriptorsListCall struct { +type ProjectsNotificationChannelsGetCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -8411,46 +12464,25 @@ type ProjectsMonitoredResourceDescriptorsListCall struct { header_ http.Header } -// List: Lists monitored resource descriptors that match a filter. This -// method does not require a Workspace. -func (r *ProjectsMonitoredResourceDescriptorsService) List(name string) *ProjectsMonitoredResourceDescriptorsListCall { - c := &ProjectsMonitoredResourceDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Gets a single notification channel. The channel includes the +// relevant configuration details with which the channel was created. +// However, the response may truncate or omit passwords, API keys, or +// other private key matter and thus the response may not be 100% +// identical to the information that was supplied in the call to the +// create method. +// +// - name: The channel for which to execute the request. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]. +func (r *ProjectsNotificationChannelsService) Get(name string) *ProjectsNotificationChannelsGetCall { + c := &ProjectsNotificationChannelsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } -// Filter sets the optional parameter "filter": An optional filter -// (https://cloud.google.com/monitoring/api/v3/filters) describing the -// descriptors to be returned. The filter can reference the descriptor's -// type and labels. For example, the following filter returns only -// Google Compute Engine descriptors that have an id -// label: -// resource.type = starts_with("gce_") AND resource.label:id -func (c *ProjectsMonitoredResourceDescriptorsListCall) Filter(filter string) *ProjectsMonitoredResourceDescriptorsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": A positive number -// that is the maximum number of results to return. -func (c *ProjectsMonitoredResourceDescriptorsListCall) PageSize(pageSize int64) *ProjectsMonitoredResourceDescriptorsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If this field is -// not empty then it must contain the nextPageToken value returned by a -// previous call to this method. Using this field causes the method to -// return additional results from the previous method call. -func (c *ProjectsMonitoredResourceDescriptorsListCall) PageToken(pageToken string) *ProjectsMonitoredResourceDescriptorsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMonitoredResourceDescriptorsListCall) Fields(s ...googleapi.Field) *ProjectsMonitoredResourceDescriptorsListCall { +func (c *ProjectsNotificationChannelsGetCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8460,7 +12492,7 @@ func (c *ProjectsMonitoredResourceDescriptorsListCall) Fields(s ...googleapi.Fie // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsMonitoredResourceDescriptorsListCall) IfNoneMatch(entityTag string) *ProjectsMonitoredResourceDescriptorsListCall { +func (c *ProjectsNotificationChannelsGetCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -8468,23 +12500,23 @@ func (c *ProjectsMonitoredResourceDescriptorsListCall) IfNoneMatch(entityTag str // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMonitoredResourceDescriptorsListCall) Context(ctx context.Context) *ProjectsMonitoredResourceDescriptorsListCall { +func (c *ProjectsNotificationChannelsGetCall) Context(ctx context.Context) *ProjectsNotificationChannelsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMonitoredResourceDescriptorsListCall) Header() http.Header { +func (c *ProjectsNotificationChannelsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsNotificationChannelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -8495,7 +12527,7 @@ func (c *ProjectsMonitoredResourceDescriptorsListCall) doRequest(alt string) (*h var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/monitoredResourceDescriptors") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -8508,35 +12540,33 @@ func (c *ProjectsMonitoredResourceDescriptorsListCall) doRequest(alt string) (*h return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.monitoredResourceDescriptors.list" call. -// Exactly one of *ListMonitoredResourceDescriptorsResponse or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *ListMonitoredResourceDescriptorsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsMonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { +// Do executes the "monitoring.projects.notificationChannels.get" call. +// Exactly one of *NotificationChannel or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NotificationChannel.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsNotificationChannelsGetCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ListMonitoredResourceDescriptorsResponse{ + ret := &NotificationChannel{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8548,144 +12578,122 @@ func (c *ProjectsMonitoredResourceDescriptorsListCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Lists monitored resource descriptors that match a filter. This method does not require a Workspace.", - // "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors", + // "description": "Gets a single notification channel. The channel includes the relevant configuration details with which the channel was created. However, the response may truncate or omit passwords, API keys, or other private key matter and thus the response may not be 100% identical to the information that was supplied in the call to the create method.", + // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}", // "httpMethod": "GET", - // "id": "monitoring.projects.monitoredResourceDescriptors.list", + // "id": "monitoring.projects.notificationChannels.get", // "parameterOrder": [ // "name" // ], // "parameters": { - // "filter": { - // "description": "An optional filter (https://cloud.google.com/monitoring/api/v3/filters) describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an id label:\nresource.type = starts_with(\"gce_\") AND resource.label:id\n", - // "location": "query", - // "type": "string" - // }, // "name": { - // "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. The channel for which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] ", // "location": "path", - // "pattern": "^projects/[^/]+$", + // "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", // "required": true, // "type": "string" - // }, - // "pageSize": { - // "description": "A positive number that is the maximum number of results to return.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.", - // "location": "query", - // "type": "string" // } // }, - // "path": "v3/{+name}/monitoredResourceDescriptors", + // "path": "v3/{+name}", // "response": { - // "$ref": "ListMonitoredResourceDescriptorsResponse" + // "$ref": "NotificationChannel" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read", - // "https://www.googleapis.com/auth/monitoring.write" + // "https://www.googleapis.com/auth/monitoring.read" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsMonitoredResourceDescriptorsListCall) Pages(ctx context.Context, f func(*ListMonitoredResourceDescriptorsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "monitoring.projects.notificationChannelDescriptors.get": +// method id "monitoring.projects.notificationChannels.getVerificationCode": -type ProjectsNotificationChannelDescriptorsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsNotificationChannelsGetVerificationCodeCall struct { + s *Service + name string + getnotificationchannelverificationcoderequest *GetNotificationChannelVerificationCodeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a single channel descriptor. The descriptor indicates which -// fields are expected / permitted for a notification channel of the -// given type. -func (r *ProjectsNotificationChannelDescriptorsService) Get(name string) *ProjectsNotificationChannelDescriptorsGetCall { - c := &ProjectsNotificationChannelDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetVerificationCode: Requests a verification code for an already +// verified channel that can then be used in a call to +// VerifyNotificationChannel() on a different channel with an equivalent +// identity in the same or in a different project. This makes it +// possible to copy a channel between projects without requiring manual +// reverification of the channel. If the channel is not in the verified +// state, this method will fail (in other words, this may only be used +// if the SendNotificationChannelVerificationCode and +// VerifyNotificationChannel paths have already been used to put the +// given channel into the verified state).There is no guarantee that the +// verification codes returned by this method will be of a similar +// structure or form as the ones that are delivered to the channel via +// SendNotificationChannelVerificationCode; while +// VerifyNotificationChannel() will recognize both the codes delivered +// via SendNotificationChannelVerificationCode() and returned from +// GetNotificationChannelVerificationCode(), it is typically the case +// that the verification codes delivered via +// SendNotificationChannelVerificationCode() will be shorter and also +// have a shorter expiration (e.g. codes such as "G-123456") whereas +// GetVerificationCode() will typically return a much longer, websafe +// base 64 encoded string that has a longer expiration time. +// +// - name: The notification channel for which a verification code is to +// be generated and retrieved. This must name a channel that is +// already verified; if the specified channel is not verified, the +// request will fail. +func (r *ProjectsNotificationChannelsService) GetVerificationCode(name string, getnotificationchannelverificationcoderequest *GetNotificationChannelVerificationCodeRequest) *ProjectsNotificationChannelsGetVerificationCodeCall { + c := &ProjectsNotificationChannelsGetVerificationCodeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name + c.getnotificationchannelverificationcoderequest = getnotificationchannelverificationcoderequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsNotificationChannelDescriptorsGetCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelDescriptorsGetCall { +func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsGetVerificationCodeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsNotificationChannelDescriptorsGetCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelDescriptorsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsNotificationChannelDescriptorsGetCall) Context(ctx context.Context) *ProjectsNotificationChannelDescriptorsGetCall { +func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Context(ctx context.Context) *ProjectsNotificationChannelsGetVerificationCodeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsNotificationChannelDescriptorsGetCall) Header() http.Header { +func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsNotificationChannelDescriptorsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsNotificationChannelsGetVerificationCodeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.getnotificationchannelverificationcoderequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}:getVerificationCode") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -8696,33 +12704,35 @@ func (c *ProjectsNotificationChannelDescriptorsGetCall) doRequest(alt string) (* return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.notificationChannelDescriptors.get" call. -// Exactly one of *NotificationChannelDescriptor or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *NotificationChannelDescriptor.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsNotificationChannelDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*NotificationChannelDescriptor, error) { +// Do executes the "monitoring.projects.notificationChannels.getVerificationCode" call. +// Exactly one of *GetNotificationChannelVerificationCodeResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GetNotificationChannelVerificationCodeResponse.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Do(opts ...googleapi.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NotificationChannelDescriptor{ + ret := &GetNotificationChannelVerificationCodeResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8734,38 +12744,40 @@ func (c *ProjectsNotificationChannelDescriptorsGetCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Gets a single channel descriptor. The descriptor indicates which fields are expected / permitted for a notification channel of the given type.", - // "flatPath": "v3/projects/{projectsId}/notificationChannelDescriptors/{notificationChannelDescriptorsId}", - // "httpMethod": "GET", - // "id": "monitoring.projects.notificationChannelDescriptors.get", + // "description": "Requests a verification code for an already verified channel that can then be used in a call to VerifyNotificationChannel() on a different channel with an equivalent identity in the same or in a different project. This makes it possible to copy a channel between projects without requiring manual reverification of the channel. If the channel is not in the verified state, this method will fail (in other words, this may only be used if the SendNotificationChannelVerificationCode and VerifyNotificationChannel paths have already been used to put the given channel into the verified state).There is no guarantee that the verification codes returned by this method will be of a similar structure or form as the ones that are delivered to the channel via SendNotificationChannelVerificationCode; while VerifyNotificationChannel() will recognize both the codes delivered via SendNotificationChannelVerificationCode() and returned from GetNotificationChannelVerificationCode(), it is typically the case that the verification codes delivered via SendNotificationChannelVerificationCode() will be shorter and also have a shorter expiration (e.g. codes such as \"G-123456\") whereas GetVerificationCode() will typically return a much longer, websafe base 64 encoded string that has a longer expiration time.", + // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}:getVerificationCode", + // "httpMethod": "POST", + // "id": "monitoring.projects.notificationChannels.getVerificationCode", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The channel type for which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE]\n", + // "description": "Required. The notification channel for which a verification code is to be generated and retrieved. This must name a channel that is already verified; if the specified channel is not verified, the request will fail.", // "location": "path", - // "pattern": "^projects/[^/]+/notificationChannelDescriptors/[^/]+$", + // "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v3/{+name}", + // "path": "v3/{+name}:getVerificationCode", + // "request": { + // "$ref": "GetNotificationChannelVerificationCodeRequest" + // }, // "response": { - // "$ref": "NotificationChannelDescriptor" + // "$ref": "GetNotificationChannelVerificationCodeResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" + // "https://www.googleapis.com/auth/monitoring" // ] // } } -// method id "monitoring.projects.notificationChannelDescriptors.list": +// method id "monitoring.projects.notificationChannels.list": -type ProjectsNotificationChannelDescriptorsListCall struct { +type ProjectsNotificationChannelsListCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -8774,19 +12786,47 @@ type ProjectsNotificationChannelDescriptorsListCall struct { header_ http.Header } -// List: Lists the descriptors for supported channel types. The use of -// descriptors makes it possible for new channel types to be dynamically -// added. -func (r *ProjectsNotificationChannelDescriptorsService) List(name string) *ProjectsNotificationChannelDescriptorsListCall { - c := &ProjectsNotificationChannelDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Lists the notification channels that have been created for the +// project. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) on which +// to execute the request. The format is: +// projects/[PROJECT_ID_OR_NUMBER] This names the container in which +// to look for the notification channels; it does not name a specific +// channel. To query a specific channel by REST resource name, use the +// GetNotificationChannel operation. +func (r *ProjectsNotificationChannelsService) List(name string) *ProjectsNotificationChannelsListCall { + c := &ProjectsNotificationChannelsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } +// Filter sets the optional parameter "filter": If provided, this field +// specifies the criteria that must be met by notification channels to +// be included in the response.For more details, see sorting and +// filtering +// (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). +func (c *ProjectsNotificationChannelsListCall) Filter(filter string) *ProjectsNotificationChannelsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": A comma-separated list +// of fields by which to sort the result. Supports the same set of +// fields as in filter. Entries can be prefixed with a minus sign to +// sort in descending rather than ascending order.For more details, see +// sorting and filtering +// (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). +func (c *ProjectsNotificationChannelsListCall) OrderBy(orderBy string) *ProjectsNotificationChannelsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + // PageSize sets the optional parameter "pageSize": The maximum number // of results to return in a single response. If not set to a positive // number, a reasonable value will be chosen by the service. -func (c *ProjectsNotificationChannelDescriptorsListCall) PageSize(pageSize int64) *ProjectsNotificationChannelDescriptorsListCall { +func (c *ProjectsNotificationChannelsListCall) PageSize(pageSize int64) *ProjectsNotificationChannelsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } @@ -8794,7 +12834,7 @@ func (c *ProjectsNotificationChannelDescriptorsListCall) PageSize(pageSize int64 // PageToken sets the optional parameter "pageToken": If non-empty, // page_token must contain a value returned as the next_page_token in a // previous response to request the next set of results. -func (c *ProjectsNotificationChannelDescriptorsListCall) PageToken(pageToken string) *ProjectsNotificationChannelDescriptorsListCall { +func (c *ProjectsNotificationChannelsListCall) PageToken(pageToken string) *ProjectsNotificationChannelsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -8802,7 +12842,7 @@ func (c *ProjectsNotificationChannelDescriptorsListCall) PageToken(pageToken str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsNotificationChannelDescriptorsListCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelDescriptorsListCall { +func (c *ProjectsNotificationChannelsListCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8812,7 +12852,7 @@ func (c *ProjectsNotificationChannelDescriptorsListCall) Fields(s ...googleapi.F // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsNotificationChannelDescriptorsListCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelDescriptorsListCall { +func (c *ProjectsNotificationChannelsListCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelsListCall { c.ifNoneMatch_ = entityTag return c } @@ -8820,23 +12860,23 @@ func (c *ProjectsNotificationChannelDescriptorsListCall) IfNoneMatch(entityTag s // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsNotificationChannelDescriptorsListCall) Context(ctx context.Context) *ProjectsNotificationChannelDescriptorsListCall { +func (c *ProjectsNotificationChannelsListCall) Context(ctx context.Context) *ProjectsNotificationChannelsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsNotificationChannelDescriptorsListCall) Header() http.Header { +func (c *ProjectsNotificationChannelsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsNotificationChannelDescriptorsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsNotificationChannelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -8847,7 +12887,7 @@ func (c *ProjectsNotificationChannelDescriptorsListCall) doRequest(alt string) ( var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/notificationChannelDescriptors") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/notificationChannels") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -8860,35 +12900,33 @@ func (c *ProjectsNotificationChannelDescriptorsListCall) doRequest(alt string) ( return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.notificationChannelDescriptors.list" call. -// Exactly one of *ListNotificationChannelDescriptorsResponse or error -// will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *ListNotificationChannelDescriptorsResponse.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsNotificationChannelDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListNotificationChannelDescriptorsResponse, error) { +// Do executes the "monitoring.projects.notificationChannels.list" call. +// Exactly one of *ListNotificationChannelsResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListNotificationChannelsResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsNotificationChannelsListCall) Do(opts ...googleapi.CallOption) (*ListNotificationChannelsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ListNotificationChannelDescriptorsResponse{ + ret := &ListNotificationChannelsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8900,21 +12938,31 @@ func (c *ProjectsNotificationChannelDescriptorsListCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Lists the descriptors for supported channel types. The use of descriptors makes it possible for new channel types to be dynamically added.", - // "flatPath": "v3/projects/{projectsId}/notificationChannelDescriptors", + // "description": "Lists the notification channels that have been created for the project.", + // "flatPath": "v3/projects/{projectsId}/notificationChannels", // "httpMethod": "GET", - // "id": "monitoring.projects.notificationChannelDescriptors.list", + // "id": "monitoring.projects.notificationChannels.list", // "parameterOrder": [ // "name" // ], // "parameters": { + // "filter": { + // "description": "If provided, this field specifies the criteria that must be met by notification channels to be included in the response.For more details, see sorting and filtering (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).", + // "location": "query", + // "type": "string" + // }, // "name": { - // "description": "Required. The REST resource name of the parent from which to retrieve the notification channel descriptors. The expected syntax is:\nprojects/[PROJECT_ID_OR_NUMBER]\nNote that this names the parent container in which to look for the descriptors; to retrieve a single descriptor by name, use the GetNotificationChannelDescriptor operation, instead.", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] This names the container in which to look for the notification channels; it does not name a specific channel. To query a specific channel by REST resource name, use the GetNotificationChannel operation.", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // }, + // "orderBy": { + // "description": "A comma-separated list of fields by which to sort the result. Supports the same set of fields as in filter. Entries can be prefixed with a minus sign to sort in descending rather than ascending order.For more details, see sorting and filtering (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).", + // "location": "query", + // "type": "string" + // }, // "pageSize": { // "description": "The maximum number of results to return in a single response. If not set to a positive number, a reasonable value will be chosen by the service.", // "format": "int32", @@ -8927,9 +12975,9 @@ func (c *ProjectsNotificationChannelDescriptorsListCall) Do(opts ...googleapi.Ca // "type": "string" // } // }, - // "path": "v3/{+name}/notificationChannelDescriptors", + // "path": "v3/{+name}/notificationChannels", // "response": { - // "$ref": "ListNotificationChannelDescriptorsResponse" + // "$ref": "ListNotificationChannelsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -8943,7 +12991,7 @@ func (c *ProjectsNotificationChannelDescriptorsListCall) Do(opts ...googleapi.Ca // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ProjectsNotificationChannelDescriptorsListCall) Pages(ctx context.Context, f func(*ListNotificationChannelDescriptorsResponse) error) error { +func (c *ProjectsNotificationChannelsListCall) Pages(ctx context.Context, f func(*ListNotificationChannelsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -8961,9 +13009,9 @@ func (c *ProjectsNotificationChannelDescriptorsListCall) Pages(ctx context.Conte } } -// method id "monitoring.projects.notificationChannels.create": +// method id "monitoring.projects.notificationChannels.patch": -type ProjectsNotificationChannelsCreateCall struct { +type ProjectsNotificationChannelsPatchCall struct { s *Service name string notificationchannel *NotificationChannel @@ -8972,20 +13020,34 @@ type ProjectsNotificationChannelsCreateCall struct { header_ http.Header } -// Create: Creates a new notification channel, representing a single -// notification endpoint such as an email address, SMS number, or -// PagerDuty service. -func (r *ProjectsNotificationChannelsService) Create(name string, notificationchannel *NotificationChannel) *ProjectsNotificationChannelsCreateCall { - c := &ProjectsNotificationChannelsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a notification channel. Fields not specified in the +// field mask remain unchanged.Design your application to single-thread +// API calls that modify the state of notification channels in a single +// project. This includes calls to CreateNotificationChannel, +// DeleteNotificationChannel and UpdateNotificationChannel. +// +// - name: The full REST resource name for this channel. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] +// The [CHANNEL_ID] is automatically assigned by the server on +// creation. +func (r *ProjectsNotificationChannelsService) Patch(name string, notificationchannel *NotificationChannel) *ProjectsNotificationChannelsPatchCall { + c := &ProjectsNotificationChannelsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.notificationchannel = notificationchannel return c } +// UpdateMask sets the optional parameter "updateMask": The fields to +// update. +func (c *ProjectsNotificationChannelsPatchCall) UpdateMask(updateMask string) *ProjectsNotificationChannelsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsNotificationChannelsCreateCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsCreateCall { +func (c *ProjectsNotificationChannelsPatchCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8993,23 +13055,23 @@ func (c *ProjectsNotificationChannelsCreateCall) Fields(s ...googleapi.Field) *P // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsNotificationChannelsCreateCall) Context(ctx context.Context) *ProjectsNotificationChannelsCreateCall { +func (c *ProjectsNotificationChannelsPatchCall) Context(ctx context.Context) *ProjectsNotificationChannelsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsNotificationChannelsCreateCall) Header() http.Header { +func (c *ProjectsNotificationChannelsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsNotificationChannelsCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsNotificationChannelsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -9022,9 +13084,9 @@ func (c *ProjectsNotificationChannelsCreateCall) doRequest(alt string) (*http.Re reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/notificationChannels") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -9035,31 +13097,31 @@ func (c *ProjectsNotificationChannelsCreateCall) doRequest(alt string) (*http.Re return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.notificationChannels.create" call. +// Do executes the "monitoring.projects.notificationChannels.patch" call. // Exactly one of *NotificationChannel or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *NotificationChannel.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsNotificationChannelsCreateCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) { +func (c *ProjectsNotificationChannelsPatchCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NotificationChannel{ ServerResponse: googleapi.ServerResponse{ @@ -9073,23 +13135,29 @@ func (c *ProjectsNotificationChannelsCreateCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Creates a new notification channel, representing a single notification endpoint such as an email address, SMS number, or PagerDuty service.", - // "flatPath": "v3/projects/{projectsId}/notificationChannels", - // "httpMethod": "POST", - // "id": "monitoring.projects.notificationChannels.create", + // "description": "Updates a notification channel. Fields not specified in the field mask remain unchanged.Design your application to single-thread API calls that modify the state of notification channels in a single project. This includes calls to CreateNotificationChannel, DeleteNotificationChannel and UpdateNotificationChannel.", + // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}", + // "httpMethod": "PATCH", + // "id": "monitoring.projects.notificationChannels.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\nThis names the container into which the channel will be written, this does not name the newly created channel. The resulting channel's name will have a normalized version of this field as a prefix, but will add /notificationChannels/[CHANNEL_ID] to identify the channel.", + // "description": "The full REST resource name for this channel. The format is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] The [CHANNEL_ID] is automatically assigned by the server on creation.", // "location": "path", - // "pattern": "^projects/[^/]+$", + // "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", // "required": true, // "type": "string" + // }, + // "updateMask": { + // "description": "The fields to update.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, - // "path": "v3/{+name}/notificationChannels", + // "path": "v3/{+name}", // "request": { // "$ref": "NotificationChannel" // }, @@ -9104,37 +13172,34 @@ func (c *ProjectsNotificationChannelsCreateCall) Do(opts ...googleapi.CallOption } -// method id "monitoring.projects.notificationChannels.delete": +// method id "monitoring.projects.notificationChannels.sendVerificationCode": -type ProjectsNotificationChannelsDeleteCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsNotificationChannelsSendVerificationCodeCall struct { + s *Service + name string + sendnotificationchannelverificationcoderequest *SendNotificationChannelVerificationCodeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes a notification channel. -func (r *ProjectsNotificationChannelsService) Delete(name string) *ProjectsNotificationChannelsDeleteCall { - c := &ProjectsNotificationChannelsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SendVerificationCode: Causes a verification code to be delivered to +// the channel. The code can then be supplied in +// VerifyNotificationChannel to verify the channel. +// +// - name: The notification channel to which to send a verification +// code. +func (r *ProjectsNotificationChannelsService) SendVerificationCode(name string, sendnotificationchannelverificationcoderequest *SendNotificationChannelVerificationCodeRequest) *ProjectsNotificationChannelsSendVerificationCodeCall { + c := &ProjectsNotificationChannelsSendVerificationCodeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - return c -} - -// Force sets the optional parameter "force": If true, the notification -// channel will be deleted regardless of its use in alert policies (the -// policies will be updated to remove the channel). If false, channels -// that are still referenced by an existing alerting policy will fail to -// be deleted in a delete operation. -func (c *ProjectsNotificationChannelsDeleteCall) Force(force bool) *ProjectsNotificationChannelsDeleteCall { - c.urlParams_.Set("force", fmt.Sprint(force)) + c.sendnotificationchannelverificationcoderequest = sendnotificationchannelverificationcoderequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsNotificationChannelsDeleteCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsDeleteCall { +func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsSendVerificationCodeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9142,33 +13207,38 @@ func (c *ProjectsNotificationChannelsDeleteCall) Fields(s ...googleapi.Field) *P // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsNotificationChannelsDeleteCall) Context(ctx context.Context) *ProjectsNotificationChannelsDeleteCall { +func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Context(ctx context.Context) *ProjectsNotificationChannelsSendVerificationCodeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsNotificationChannelsDeleteCall) Header() http.Header { +func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsNotificationChannelsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsNotificationChannelsSendVerificationCodeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sendnotificationchannelverificationcoderequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}:sendVerificationCode") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -9179,31 +13249,31 @@ func (c *ProjectsNotificationChannelsDeleteCall) doRequest(alt string) (*http.Re return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.notificationChannels.delete" call. +// Do executes the "monitoring.projects.notificationChannels.sendVerificationCode" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ProjectsNotificationChannelsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -9217,28 +13287,26 @@ func (c *ProjectsNotificationChannelsDeleteCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Deletes a notification channel.", - // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}", - // "httpMethod": "DELETE", - // "id": "monitoring.projects.notificationChannels.delete", + // "description": "Causes a verification code to be delivered to the channel. The code can then be supplied in VerifyNotificationChannel to verify the channel.", + // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}:sendVerificationCode", + // "httpMethod": "POST", + // "id": "monitoring.projects.notificationChannels.sendVerificationCode", // "parameterOrder": [ // "name" // ], // "parameters": { - // "force": { - // "description": "If true, the notification channel will be deleted regardless of its use in alert policies (the policies will be updated to remove the channel). If false, channels that are still referenced by an existing alerting policy will fail to be deleted in a delete operation.", - // "location": "query", - // "type": "boolean" - // }, // "name": { - // "description": "Required. The channel for which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]\n", + // "description": "Required. The notification channel to which to send a verification code.", // "location": "path", // "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v3/{+name}", + // "path": "v3/{+name}:sendVerificationCode", + // "request": { + // "$ref": "SendNotificationChannelVerificationCodeRequest" + // }, // "response": { // "$ref": "Empty" // }, @@ -9250,80 +13318,72 @@ func (c *ProjectsNotificationChannelsDeleteCall) Do(opts ...googleapi.CallOption } -// method id "monitoring.projects.notificationChannels.get": +// method id "monitoring.projects.notificationChannels.verify": -type ProjectsNotificationChannelsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsNotificationChannelsVerifyCall struct { + s *Service + name string + verifynotificationchannelrequest *VerifyNotificationChannelRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Gets a single notification channel. The channel includes the -// relevant configuration details with which the channel was created. -// However, the response may truncate or omit passwords, API keys, or -// other private key matter and thus the response may not be 100% -// identical to the information that was supplied in the call to the -// create method. -func (r *ProjectsNotificationChannelsService) Get(name string) *ProjectsNotificationChannelsGetCall { - c := &ProjectsNotificationChannelsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Verify: Verifies a NotificationChannel by proving receipt of the code +// delivered to the channel as a result of calling +// SendNotificationChannelVerificationCode. +// +// - name: The notification channel to verify. +func (r *ProjectsNotificationChannelsService) Verify(name string, verifynotificationchannelrequest *VerifyNotificationChannelRequest) *ProjectsNotificationChannelsVerifyCall { + c := &ProjectsNotificationChannelsVerifyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name + c.verifynotificationchannelrequest = verifynotificationchannelrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsNotificationChannelsGetCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsGetCall { +func (c *ProjectsNotificationChannelsVerifyCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsVerifyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsNotificationChannelsGetCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsNotificationChannelsGetCall) Context(ctx context.Context) *ProjectsNotificationChannelsGetCall { +func (c *ProjectsNotificationChannelsVerifyCall) Context(ctx context.Context) *ProjectsNotificationChannelsVerifyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsNotificationChannelsGetCall) Header() http.Header { +func (c *ProjectsNotificationChannelsVerifyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsNotificationChannelsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsNotificationChannelsVerifyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.verifynotificationchannelrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}:verify") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -9334,31 +13394,31 @@ func (c *ProjectsNotificationChannelsGetCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.notificationChannels.get" call. +// Do executes the "monitoring.projects.notificationChannels.verify" call. // Exactly one of *NotificationChannel or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *NotificationChannel.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsNotificationChannelsGetCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) { +func (c *ProjectsNotificationChannelsVerifyCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &NotificationChannel{ ServerResponse: googleapi.ServerResponse{ @@ -9372,78 +13432,67 @@ func (c *ProjectsNotificationChannelsGetCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Gets a single notification channel. The channel includes the relevant configuration details with which the channel was created. However, the response may truncate or omit passwords, API keys, or other private key matter and thus the response may not be 100% identical to the information that was supplied in the call to the create method.", - // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}", - // "httpMethod": "GET", - // "id": "monitoring.projects.notificationChannels.get", + // "description": "Verifies a NotificationChannel by proving receipt of the code delivered to the channel as a result of calling SendNotificationChannelVerificationCode.", + // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}:verify", + // "httpMethod": "POST", + // "id": "monitoring.projects.notificationChannels.verify", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The channel for which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]\n", + // "description": "Required. The notification channel to verify.", // "location": "path", // "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v3/{+name}", + // "path": "v3/{+name}:verify", + // "request": { + // "$ref": "VerifyNotificationChannelRequest" + // }, // "response": { // "$ref": "NotificationChannel" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring", - // "https://www.googleapis.com/auth/monitoring.read" + // "https://www.googleapis.com/auth/monitoring" // ] // } } -// method id "monitoring.projects.notificationChannels.getVerificationCode": +// method id "monitoring.projects.snoozes.create": -type ProjectsNotificationChannelsGetVerificationCodeCall struct { - s *Service - name string - getnotificationchannelverificationcoderequest *GetNotificationChannelVerificationCodeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSnoozesCreateCall struct { + s *Service + parent string + snooze *Snooze + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetVerificationCode: Requests a verification code for an already -// verified channel that can then be used in a call to -// VerifyNotificationChannel() on a different channel with an equivalent -// identity in the same or in a different project. This makes it -// possible to copy a channel between projects without requiring manual -// reverification of the channel. If the channel is not in the verified -// state, this method will fail (in other words, this may only be used -// if the SendNotificationChannelVerificationCode and -// VerifyNotificationChannel paths have already been used to put the -// given channel into the verified state).There is no guarantee that the -// verification codes returned by this method will be of a similar -// structure or form as the ones that are delivered to the channel via -// SendNotificationChannelVerificationCode; while -// VerifyNotificationChannel() will recognize both the codes delivered -// via SendNotificationChannelVerificationCode() and returned from -// GetNotificationChannelVerificationCode(), it is typically the case -// that the verification codes delivered via -// SendNotificationChannelVerificationCode() will be shorter and also -// have a shorter expiration (e.g. codes such as "G-123456") whereas -// GetVerificationCode() will typically return a much longer, websafe -// base 64 encoded string that has a longer expiration time. -func (r *ProjectsNotificationChannelsService) GetVerificationCode(name string, getnotificationchannelverificationcoderequest *GetNotificationChannelVerificationCodeRequest) *ProjectsNotificationChannelsGetVerificationCodeCall { - c := &ProjectsNotificationChannelsGetVerificationCodeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.getnotificationchannelverificationcoderequest = getnotificationchannelverificationcoderequest +// Create: Creates a Snooze that will prevent alerts, which match the +// provided criteria, from being opened. The Snooze applies for a +// specific time interval. +// +// - parent: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) in which +// a Snooze should be created. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. +func (r *ProjectsSnoozesService) Create(parent string, snooze *Snooze) *ProjectsSnoozesCreateCall { + c := &ProjectsSnoozesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.snooze = snooze return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsGetVerificationCodeCall { +func (c *ProjectsSnoozesCreateCall) Fields(s ...googleapi.Field) *ProjectsSnoozesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9451,36 +13500,36 @@ func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Fields(s ...google // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Context(ctx context.Context) *ProjectsNotificationChannelsGetVerificationCodeCall { +func (c *ProjectsSnoozesCreateCall) Context(ctx context.Context) *ProjectsSnoozesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Header() http.Header { +func (c *ProjectsSnoozesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsNotificationChannelsGetVerificationCodeCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSnoozesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.getnotificationchannelverificationcoderequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snooze) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}:getVerificationCode") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/snoozes") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -9488,40 +13537,38 @@ func (c *ProjectsNotificationChannelsGetVerificationCodeCall) doRequest(alt stri } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.notificationChannels.getVerificationCode" call. -// Exactly one of *GetNotificationChannelVerificationCodeResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *GetNotificationChannelVerificationCodeResponse.ServerResponse.Header -// or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Do(opts ...googleapi.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) { +// Do executes the "monitoring.projects.snoozes.create" call. +// Exactly one of *Snooze or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Snooze.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsSnoozesCreateCall) Do(opts ...googleapi.CallOption) (*Snooze, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &GetNotificationChannelVerificationCodeResponse{ + ret := &Snooze{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9533,28 +13580,28 @@ func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Do(opts ...googlea } return ret, nil // { - // "description": "Requests a verification code for an already verified channel that can then be used in a call to VerifyNotificationChannel() on a different channel with an equivalent identity in the same or in a different project. This makes it possible to copy a channel between projects without requiring manual reverification of the channel. If the channel is not in the verified state, this method will fail (in other words, this may only be used if the SendNotificationChannelVerificationCode and VerifyNotificationChannel paths have already been used to put the given channel into the verified state).There is no guarantee that the verification codes returned by this method will be of a similar structure or form as the ones that are delivered to the channel via SendNotificationChannelVerificationCode; while VerifyNotificationChannel() will recognize both the codes delivered via SendNotificationChannelVerificationCode() and returned from GetNotificationChannelVerificationCode(), it is typically the case that the verification codes delivered via SendNotificationChannelVerificationCode() will be shorter and also have a shorter expiration (e.g. codes such as \"G-123456\") whereas GetVerificationCode() will typically return a much longer, websafe base 64 encoded string that has a longer expiration time.", - // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}:getVerificationCode", + // "description": "Creates a Snooze that will prevent alerts, which match the provided criteria, from being opened. The Snooze applies for a specific time interval.", + // "flatPath": "v3/projects/{projectsId}/snoozes", // "httpMethod": "POST", - // "id": "monitoring.projects.notificationChannels.getVerificationCode", + // "id": "monitoring.projects.snoozes.create", // "parameterOrder": [ - // "name" + // "parent" // ], // "parameters": { - // "name": { - // "description": "Required. The notification channel for which a verification code is to be generated and retrieved. This must name a channel that is already verified; if the specified channel is not verified, the request will fail.", + // "parent": { + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) in which a Snooze should be created. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", - // "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", + // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v3/{+name}:getVerificationCode", + // "path": "v3/{+parent}/snoozes", // "request": { - // "$ref": "GetNotificationChannelVerificationCodeRequest" + // "$ref": "Snooze" // }, // "response": { - // "$ref": "GetNotificationChannelVerificationCodeResponse" + // "$ref": "Snooze" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -9564,9 +13611,9 @@ func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Do(opts ...googlea } -// method id "monitoring.projects.notificationChannels.list": +// method id "monitoring.projects.snoozes.get": -type ProjectsNotificationChannelsListCall struct { +type ProjectsSnoozesGetCall struct { s *Service name string urlParams_ gensupport.URLParams @@ -9575,55 +13622,20 @@ type ProjectsNotificationChannelsListCall struct { header_ http.Header } -// List: Lists the notification channels that have been created for the -// project. -func (r *ProjectsNotificationChannelsService) List(name string) *ProjectsNotificationChannelsListCall { - c := &ProjectsNotificationChannelsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves a Snooze by name. +// +// - name: The ID of the Snooze to retrieve. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID]. +func (r *ProjectsSnoozesService) Get(name string) *ProjectsSnoozesGetCall { + c := &ProjectsSnoozesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } -// Filter sets the optional parameter "filter": If provided, this field -// specifies the criteria that must be met by notification channels to -// be included in the response.For more details, see sorting and -// filtering -// (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). -func (c *ProjectsNotificationChannelsListCall) Filter(filter string) *ProjectsNotificationChannelsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// OrderBy sets the optional parameter "orderBy": A comma-separated list -// of fields by which to sort the result. Supports the same set of -// fields as in filter. Entries can be prefixed with a minus sign to -// sort in descending rather than ascending order.For more details, see -// sorting and filtering -// (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering). -func (c *ProjectsNotificationChannelsListCall) OrderBy(orderBy string) *ProjectsNotificationChannelsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageSize sets the optional parameter "pageSize": The maximum number -// of results to return in a single response. If not set to a positive -// number, a reasonable value will be chosen by the service. -func (c *ProjectsNotificationChannelsListCall) PageSize(pageSize int64) *ProjectsNotificationChannelsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": If non-empty, -// page_token must contain a value returned as the next_page_token in a -// previous response to request the next set of results. -func (c *ProjectsNotificationChannelsListCall) PageToken(pageToken string) *ProjectsNotificationChannelsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsNotificationChannelsListCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsListCall { +func (c *ProjectsSnoozesGetCall) Fields(s ...googleapi.Field) *ProjectsSnoozesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9633,7 +13645,7 @@ func (c *ProjectsNotificationChannelsListCall) Fields(s ...googleapi.Field) *Pro // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsNotificationChannelsListCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelsListCall { +func (c *ProjectsSnoozesGetCall) IfNoneMatch(entityTag string) *ProjectsSnoozesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -9641,23 +13653,23 @@ func (c *ProjectsNotificationChannelsListCall) IfNoneMatch(entityTag string) *Pr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsNotificationChannelsListCall) Context(ctx context.Context) *ProjectsNotificationChannelsListCall { +func (c *ProjectsSnoozesGetCall) Context(ctx context.Context) *ProjectsSnoozesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsNotificationChannelsListCall) Header() http.Header { +func (c *ProjectsSnoozesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsNotificationChannelsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSnoozesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -9668,7 +13680,7 @@ func (c *ProjectsNotificationChannelsListCall) doRequest(alt string) (*http.Resp var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/notificationChannels") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -9681,33 +13693,33 @@ func (c *ProjectsNotificationChannelsListCall) doRequest(alt string) (*http.Resp return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.notificationChannels.list" call. -// Exactly one of *ListNotificationChannelsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListNotificationChannelsResponse.ServerResponse.Header or (if -// a response was returned at all) in error.(*googleapi.Error).Header. -// Use googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsNotificationChannelsListCall) Do(opts ...googleapi.CallOption) (*ListNotificationChannelsResponse, error) { +// Do executes the "monitoring.projects.snoozes.get" call. +// Exactly one of *Snooze or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Snooze.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsSnoozesGetCall) Do(opts ...googleapi.CallOption) (*Snooze, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &ListNotificationChannelsResponse{ + ret := &Snooze{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9719,46 +13731,25 @@ func (c *ProjectsNotificationChannelsListCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the notification channels that have been created for the project.", - // "flatPath": "v3/projects/{projectsId}/notificationChannels", + // "description": "Retrieves a Snooze by name.", + // "flatPath": "v3/projects/{projectsId}/snoozes/{snoozesId}", // "httpMethod": "GET", - // "id": "monitoring.projects.notificationChannels.list", + // "id": "monitoring.projects.snoozes.get", // "parameterOrder": [ // "name" // ], // "parameters": { - // "filter": { - // "description": "If provided, this field specifies the criteria that must be met by notification channels to be included in the response.For more details, see sorting and filtering (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, // "name": { - // "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\nThis names the container in which to look for the notification channels; it does not name a specific channel. To query a specific channel by REST resource name, use the GetNotificationChannel operation.", + // "description": "Required. The ID of the Snooze to retrieve. The format is: projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] ", // "location": "path", - // "pattern": "^projects/[^/]+$", + // "pattern": "^projects/[^/]+/snoozes/[^/]+$", // "required": true, // "type": "string" - // }, - // "orderBy": { - // "description": "A comma-separated list of fields by which to sort the result. Supports the same set of fields as in filter. Entries can be prefixed with a minus sign to sort in descending rather than ascending order.For more details, see sorting and filtering (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).", - // "location": "query", - // "type": "string" - // }, - // "pageSize": { - // "description": "The maximum number of results to return in a single response. If not set to a positive number, a reasonable value will be chosen by the service.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "If non-empty, page_token must contain a value returned as the next_page_token in a previous response to request the next set of results.", - // "location": "query", - // "type": "string" // } // }, - // "path": "v3/{+name}/notificationChannels", + // "path": "v3/{+name}", // "response": { - // "$ref": "ListNotificationChannelsResponse" + // "$ref": "Snooze" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -9769,134 +13760,147 @@ func (c *ProjectsNotificationChannelsListCall) Do(opts ...googleapi.CallOption) } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsNotificationChannelsListCall) Pages(ctx context.Context, f func(*ListNotificationChannelsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } +// method id "monitoring.projects.snoozes.list": + +type ProjectsSnoozesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// method id "monitoring.projects.notificationChannels.patch": +// List: Lists the Snoozes associated with a project. Can optionally +// pass in filter, which specifies predicates to match Snoozes. +// +// - parent: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) whose +// Snoozes should be listed. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. +func (r *ProjectsSnoozesService) List(parent string) *ProjectsSnoozesListCall { + c := &ProjectsSnoozesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} -type ProjectsNotificationChannelsPatchCall struct { - s *Service - name string - notificationchannel *NotificationChannel - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Filter sets the optional parameter "filter": Optional filter to +// restrict results to the given criteria. The following fields are +// supported. interval.start_time interval.end_timeFor example: ``` +// interval.start_time > "2022-03-11T00:00:00-08:00" AND +// interval.end_time < "2022-03-12T00:00:00-08:00" ``` +func (c *ProjectsSnoozesListCall) Filter(filter string) *ProjectsSnoozesListCall { + c.urlParams_.Set("filter", filter) + return c } -// Patch: Updates a notification channel. Fields not specified in the -// field mask remain unchanged. -func (r *ProjectsNotificationChannelsService) Patch(name string, notificationchannel *NotificationChannel) *ProjectsNotificationChannelsPatchCall { - c := &ProjectsNotificationChannelsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.notificationchannel = notificationchannel +// PageSize sets the optional parameter "pageSize": The maximum number +// of results to return for a single query. The server may further +// constrain the maximum number of results returned in a single page. +// The value should be in the range 1, 1000. If the value given is +// outside this range, the server will decide the number of results to +// be returned. +func (c *ProjectsSnoozesListCall) PageSize(pageSize int64) *ProjectsSnoozesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// UpdateMask sets the optional parameter "updateMask": The fields to -// update. -func (c *ProjectsNotificationChannelsPatchCall) UpdateMask(updateMask string) *ProjectsNotificationChannelsPatchCall { - c.urlParams_.Set("updateMask", updateMask) +// PageToken sets the optional parameter "pageToken": The +// next_page_token from a previous call to ListSnoozesRequest to get the +// next page of results. +func (c *ProjectsSnoozesListCall) PageToken(pageToken string) *ProjectsSnoozesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsNotificationChannelsPatchCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsPatchCall { +func (c *ProjectsSnoozesListCall) Fields(s ...googleapi.Field) *ProjectsSnoozesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsSnoozesListCall) IfNoneMatch(entityTag string) *ProjectsSnoozesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsNotificationChannelsPatchCall) Context(ctx context.Context) *ProjectsNotificationChannelsPatchCall { +func (c *ProjectsSnoozesListCall) Context(ctx context.Context) *ProjectsSnoozesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsNotificationChannelsPatchCall) Header() http.Header { +func (c *ProjectsSnoozesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsNotificationChannelsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSnoozesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.notificationchannel) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/snoozes") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "name": c.name, + "parent": c.parent, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.notificationChannels.patch" call. -// Exactly one of *NotificationChannel or error will be non-nil. Any +// Do executes the "monitoring.projects.snoozes.list" call. +// Exactly one of *ListSnoozesResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *NotificationChannel.ServerResponse.Header or (if a response was +// *ListSnoozesResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ProjectsNotificationChannelsPatchCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) { +func (c *ProjectsSnoozesListCall) Do(opts ...googleapi.CallOption) (*ListSnoozesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NotificationChannel{ + ret := &ListSnoozesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9908,68 +13912,118 @@ func (c *ProjectsNotificationChannelsPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a notification channel. Fields not specified in the field mask remain unchanged.", - // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}", - // "httpMethod": "PATCH", - // "id": "monitoring.projects.notificationChannels.patch", + // "description": "Lists the Snoozes associated with a project. Can optionally pass in filter, which specifies predicates to match Snoozes.", + // "flatPath": "v3/projects/{projectsId}/snoozes", + // "httpMethod": "GET", + // "id": "monitoring.projects.snoozes.list", // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The full REST resource name for this channel. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]\nThe [CHANNEL_ID] is automatically assigned by the server on creation.", - // "location": "path", - // "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", - // "required": true, + // "parent" + // ], + // "parameters": { + // "filter": { + // "description": "Optional. Optional filter to restrict results to the given criteria. The following fields are supported. interval.start_time interval.end_timeFor example: ``` interval.start_time \u003e \"2022-03-11T00:00:00-08:00\" AND interval.end_time \u003c \"2022-03-12T00:00:00-08:00\" ``` ", + // "location": "query", // "type": "string" // }, - // "updateMask": { - // "description": "The fields to update.", - // "format": "google-fieldmask", + // "pageSize": { + // "description": "Optional. The maximum number of results to return for a single query. The server may further constrain the maximum number of results returned in a single page. The value should be in the range 1, 1000. If the value given is outside this range, the server will decide the number of results to be returned.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. The next_page_token from a previous call to ListSnoozesRequest to get the next page of results.", // "location": "query", // "type": "string" + // }, + // "parent": { + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) whose Snoozes should be listed. The format is: projects/[PROJECT_ID_OR_NUMBER] ", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" // } // }, - // "path": "v3/{+name}", - // "request": { - // "$ref": "NotificationChannel" - // }, + // "path": "v3/{+parent}/snoozes", // "response": { - // "$ref": "NotificationChannel" + // "$ref": "ListSnoozesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.read" // ] // } } -// method id "monitoring.projects.notificationChannels.sendVerificationCode": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsSnoozesListCall) Pages(ctx context.Context, f func(*ListSnoozesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsNotificationChannelsSendVerificationCodeCall struct { - s *Service - name string - sendnotificationchannelverificationcoderequest *SendNotificationChannelVerificationCodeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "monitoring.projects.snoozes.patch": + +type ProjectsSnoozesPatchCall struct { + s *Service + name string + snooze *Snooze + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SendVerificationCode: Causes a verification code to be delivered to -// the channel. The code can then be supplied in -// VerifyNotificationChannel to verify the channel. -func (r *ProjectsNotificationChannelsService) SendVerificationCode(name string, sendnotificationchannelverificationcoderequest *SendNotificationChannelVerificationCodeRequest) *ProjectsNotificationChannelsSendVerificationCodeCall { - c := &ProjectsNotificationChannelsSendVerificationCodeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a Snooze, identified by its name, with the parameters +// in the given Snooze object. +// +// - name: The name of the Snooze. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] The ID of the +// Snooze will be generated by the system. +func (r *ProjectsSnoozesService) Patch(name string, snooze *Snooze) *ProjectsSnoozesPatchCall { + c := &ProjectsSnoozesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.sendnotificationchannelverificationcoderequest = sendnotificationchannelverificationcoderequest + c.snooze = snooze + return c +} + +// UpdateMask sets the optional parameter "updateMask": Required. The +// fields to update.For each field listed in update_mask: If the Snooze +// object supplied in the UpdateSnoozeRequest has a value for that +// field, the value of the field in the existing Snooze will be set to +// the value of the field in the supplied Snooze. If the field does not +// have a value in the supplied Snooze, the field in the existing Snooze +// is set to its default value.Fields not listed retain their existing +// value.The following are the field names that are accepted in +// update_mask: display_name interval.start_time interval.end_timeThat +// said, the start time and end time of the Snooze determines which +// fields can legally be updated. Before attempting an update, users +// should consult the documentation for UpdateSnoozeRequest, which talks +// about which fields can be updated. +func (c *ProjectsSnoozesPatchCall) UpdateMask(updateMask string) *ProjectsSnoozesPatchCall { + c.urlParams_.Set("updateMask", updateMask) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsSendVerificationCodeCall { +func (c *ProjectsSnoozesPatchCall) Fields(s ...googleapi.Field) *ProjectsSnoozesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9977,38 +14031,38 @@ func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Fields(s ...googl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Context(ctx context.Context) *ProjectsNotificationChannelsSendVerificationCodeCall { +func (c *ProjectsSnoozesPatchCall) Context(ctx context.Context) *ProjectsSnoozesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Header() http.Header { +func (c *ProjectsSnoozesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsNotificationChannelsSendVerificationCodeCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSnoozesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sendnotificationchannelverificationcoderequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snooze) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}:sendVerificationCode") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -10019,33 +14073,33 @@ func (c *ProjectsNotificationChannelsSendVerificationCodeCall) doRequest(alt str return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.notificationChannels.sendVerificationCode" call. -// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// Do executes the "monitoring.projects.snoozes.patch" call. +// Exactly one of *Snooze or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Empty.ServerResponse.Header or (if a response was returned at all) +// *Snooze.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *ProjectsSnoozesPatchCall) Do(opts ...googleapi.CallOption) (*Snooze, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &Empty{ + ret := &Snooze{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10057,28 +14111,34 @@ func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Do(opts ...google } return ret, nil // { - // "description": "Causes a verification code to be delivered to the channel. The code can then be supplied in VerifyNotificationChannel to verify the channel.", - // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}:sendVerificationCode", - // "httpMethod": "POST", - // "id": "monitoring.projects.notificationChannels.sendVerificationCode", + // "description": "Updates a Snooze, identified by its name, with the parameters in the given Snooze object.", + // "flatPath": "v3/projects/{projectsId}/snoozes/{snoozesId}", + // "httpMethod": "PATCH", + // "id": "monitoring.projects.snoozes.patch", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The notification channel to which to send a verification code.", + // "description": "Required. The name of the Snooze. The format is: projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID] The ID of the Snooze will be generated by the system.", // "location": "path", - // "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", + // "pattern": "^projects/[^/]+/snoozes/[^/]+$", // "required": true, // "type": "string" + // }, + // "updateMask": { + // "description": "Required. The fields to update.For each field listed in update_mask: If the Snooze object supplied in the UpdateSnoozeRequest has a value for that field, the value of the field in the existing Snooze will be set to the value of the field in the supplied Snooze. If the field does not have a value in the supplied Snooze, the field in the existing Snooze is set to its default value.Fields not listed retain their existing value.The following are the field names that are accepted in update_mask: display_name interval.start_time interval.end_timeThat said, the start time and end time of the Snooze determines which fields can legally be updated. Before attempting an update, users should consult the documentation for UpdateSnoozeRequest, which talks about which fields can be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, - // "path": "v3/{+name}:sendVerificationCode", + // "path": "v3/{+name}", // "request": { - // "$ref": "SendNotificationChannelVerificationCodeRequest" + // "$ref": "Snooze" // }, // "response": { - // "$ref": "Empty" + // "$ref": "Snooze" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -10088,31 +14148,37 @@ func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Do(opts ...google } -// method id "monitoring.projects.notificationChannels.verify": +// method id "monitoring.projects.timeSeries.create": -type ProjectsNotificationChannelsVerifyCall struct { - s *Service - name string - verifynotificationchannelrequest *VerifyNotificationChannelRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsTimeSeriesCreateCall struct { + s *Service + name string + createtimeseriesrequest *CreateTimeSeriesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Verify: Verifies a NotificationChannel by proving receipt of the code -// delivered to the channel as a result of calling -// SendNotificationChannelVerificationCode. -func (r *ProjectsNotificationChannelsService) Verify(name string, verifynotificationchannelrequest *VerifyNotificationChannelRequest) *ProjectsNotificationChannelsVerifyCall { - c := &ProjectsNotificationChannelsVerifyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Create: Creates or adds data to one or more time series. The response +// is empty if all time series in the request were written. If any time +// series could not be written, a corresponding failure message is +// included in the error response. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) on which +// to execute the request. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. +func (r *ProjectsTimeSeriesService) Create(name string, createtimeseriesrequest *CreateTimeSeriesRequest) *ProjectsTimeSeriesCreateCall { + c := &ProjectsTimeSeriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name - c.verifynotificationchannelrequest = verifynotificationchannelrequest + c.createtimeseriesrequest = createtimeseriesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsNotificationChannelsVerifyCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsVerifyCall { +func (c *ProjectsTimeSeriesCreateCall) Fields(s ...googleapi.Field) *ProjectsTimeSeriesCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -10120,36 +14186,36 @@ func (c *ProjectsNotificationChannelsVerifyCall) Fields(s ...googleapi.Field) *P // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsNotificationChannelsVerifyCall) Context(ctx context.Context) *ProjectsNotificationChannelsVerifyCall { +func (c *ProjectsTimeSeriesCreateCall) Context(ctx context.Context) *ProjectsTimeSeriesCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsNotificationChannelsVerifyCall) Header() http.Header { +func (c *ProjectsTimeSeriesCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsNotificationChannelsVerifyCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsTimeSeriesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.verifynotificationchannelrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createtimeseriesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}:verify") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/timeSeries") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -10162,33 +14228,33 @@ func (c *ProjectsNotificationChannelsVerifyCall) doRequest(alt string) (*http.Re return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.notificationChannels.verify" call. -// Exactly one of *NotificationChannel or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *NotificationChannel.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsNotificationChannelsVerifyCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) { +// Do executes the "monitoring.projects.timeSeries.create" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } - ret := &NotificationChannel{ + ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10200,40 +14266,41 @@ func (c *ProjectsNotificationChannelsVerifyCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Verifies a NotificationChannel by proving receipt of the code delivered to the channel as a result of calling SendNotificationChannelVerificationCode.", - // "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}:verify", + // "description": "Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response.", + // "flatPath": "v3/projects/{projectsId}/timeSeries", // "httpMethod": "POST", - // "id": "monitoring.projects.notificationChannels.verify", + // "id": "monitoring.projects.timeSeries.create", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The notification channel to verify.", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", - // "pattern": "^projects/[^/]+/notificationChannels/[^/]+$", + // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v3/{+name}:verify", + // "path": "v3/{+name}/timeSeries", // "request": { - // "$ref": "VerifyNotificationChannelRequest" + // "$ref": "CreateTimeSeriesRequest" // }, // "response": { - // "$ref": "NotificationChannel" + // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/monitoring" + // "https://www.googleapis.com/auth/monitoring", + // "https://www.googleapis.com/auth/monitoring.write" // ] // } } -// method id "monitoring.projects.timeSeries.create": +// method id "monitoring.projects.timeSeries.createService": -type ProjectsTimeSeriesCreateCall struct { +type ProjectsTimeSeriesCreateServiceCall struct { s *Service name string createtimeseriesrequest *CreateTimeSeriesRequest @@ -10242,12 +14309,21 @@ type ProjectsTimeSeriesCreateCall struct { header_ http.Header } -// Create: Creates or adds data to one or more time series. The response -// is empty if all time series in the request were written. If any time -// series could not be written, a corresponding failure message is -// included in the error response. -func (r *ProjectsTimeSeriesService) Create(name string, createtimeseriesrequest *CreateTimeSeriesRequest) *ProjectsTimeSeriesCreateCall { - c := &ProjectsTimeSeriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateService: Creates or adds data to one or more service time +// series. A service time series is a time series for a metric from a +// Google Cloud service. The response is empty if all time series in the +// request were written. If any time series could not be written, a +// corresponding failure message is included in the error response. This +// endpoint rejects writes to user-defined metrics. This method is only +// for use by Google Cloud services. Use projects.timeSeries.create +// instead. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) on which +// to execute the request. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. +func (r *ProjectsTimeSeriesService) CreateService(name string, createtimeseriesrequest *CreateTimeSeriesRequest) *ProjectsTimeSeriesCreateServiceCall { + c := &ProjectsTimeSeriesCreateServiceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.createtimeseriesrequest = createtimeseriesrequest return c @@ -10256,7 +14332,7 @@ func (r *ProjectsTimeSeriesService) Create(name string, createtimeseriesrequest // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsTimeSeriesCreateCall) Fields(s ...googleapi.Field) *ProjectsTimeSeriesCreateCall { +func (c *ProjectsTimeSeriesCreateServiceCall) Fields(s ...googleapi.Field) *ProjectsTimeSeriesCreateServiceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -10264,23 +14340,23 @@ func (c *ProjectsTimeSeriesCreateCall) Fields(s ...googleapi.Field) *ProjectsTim // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsTimeSeriesCreateCall) Context(ctx context.Context) *ProjectsTimeSeriesCreateCall { +func (c *ProjectsTimeSeriesCreateServiceCall) Context(ctx context.Context) *ProjectsTimeSeriesCreateServiceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsTimeSeriesCreateCall) Header() http.Header { +func (c *ProjectsTimeSeriesCreateServiceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsTimeSeriesCreateCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsTimeSeriesCreateServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -10293,7 +14369,7 @@ func (c *ProjectsTimeSeriesCreateCall) doRequest(alt string) (*http.Response, er reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/timeSeries") + urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/timeSeries:createService") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -10306,31 +14382,31 @@ func (c *ProjectsTimeSeriesCreateCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "monitoring.projects.timeSeries.create" call. +// Do executes the "monitoring.projects.timeSeries.createService" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ProjectsTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) (*Empty, error) { +func (c *ProjectsTimeSeriesCreateServiceCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -10344,23 +14420,23 @@ func (c *ProjectsTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) (*Empty, } return ret, nil // { - // "description": "Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response.", - // "flatPath": "v3/projects/{projectsId}/timeSeries", + // "description": "Creates or adds data to one or more service time series. A service time series is a time series for a metric from a Google Cloud service. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response. This endpoint rejects writes to user-defined metrics. This method is only for use by Google Cloud services. Use projects.timeSeries.create instead.", + // "flatPath": "v3/projects/{projectsId}/timeSeries:createService", // "httpMethod": "POST", - // "id": "monitoring.projects.timeSeries.create", + // "id": "monitoring.projects.timeSeries.createService", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { - // "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, // "type": "string" // } // }, - // "path": "v3/{+name}/timeSeries", + // "path": "v3/{+name}/timeSeries:createService", // "request": { // "$ref": "CreateTimeSeriesRequest" // }, @@ -10387,8 +14463,13 @@ type ProjectsTimeSeriesListCall struct { header_ http.Header } -// List: Lists time series that match a filter. This method does not -// require a Workspace. +// List: Lists time series that match a filter. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name), +// organization or folder on which to execute the request. The format +// is: projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID] +// folders/[FOLDER_ID]. func (r *ProjectsTimeSeriesService) List(name string) *ProjectsTimeSeriesListCall { c := &ProjectsTimeSeriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -10400,11 +14481,12 @@ func (r *ProjectsTimeSeriesService) List(name string) *ProjectsTimeSeriesListCal // interval, in seconds, that is used to divide the data in all the time // series into consistent blocks of time. This will be done before the // per-series aligner can be applied to the data.The value must be at -// least 60 seconds, at most 104 weeks. If a per-series aligner other -// than ALIGN_NONE is specified, this field is required or an error is -// returned. If no per-series aligner is specified, or the aligner -// ALIGN_NONE is specified, then this field is ignored.The maximum value -// of the alignment_period is 2 years, or 104 weeks. +// least 60 seconds. If a per-series aligner other than ALIGN_NONE is +// specified, this field is required or an error is returned. If no +// per-series aligner is specified, or the aligner ALIGN_NONE is +// specified, then this field is ignored.The maximum value of the +// alignment_period is 104 weeks (2 years) for charts, and 90,000 +// seconds (25 hours) for alerting policies. func (c *ProjectsTimeSeriesListCall) AggregationAlignmentPeriod(aggregationAlignmentPeriod string) *ProjectsTimeSeriesListCall { c.urlParams_.Set("aggregation.alignmentPeriod", aggregationAlignmentPeriod) return c @@ -10426,20 +14508,93 @@ func (c *ProjectsTimeSeriesListCall) AggregationAlignmentPeriod(aggregationAlign // error is returned. // // Possible values: -// "REDUCE_NONE" -// "REDUCE_MEAN" -// "REDUCE_MIN" -// "REDUCE_MAX" -// "REDUCE_SUM" -// "REDUCE_STDDEV" -// "REDUCE_COUNT" -// "REDUCE_COUNT_TRUE" -// "REDUCE_COUNT_FALSE" -// "REDUCE_FRACTION_TRUE" -// "REDUCE_PERCENTILE_99" -// "REDUCE_PERCENTILE_95" -// "REDUCE_PERCENTILE_50" -// "REDUCE_PERCENTILE_05" +// +// "REDUCE_NONE" - No cross-time series reduction. The output of the +// +// Aligner is returned. +// +// "REDUCE_MEAN" - Reduce by computing the mean value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric or distribution values. The value_type of +// the output is DOUBLE. +// +// "REDUCE_MIN" - Reduce by computing the minimum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_MAX" - Reduce by computing the maximum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_SUM" - Reduce by computing the sum across time series for +// +// each alignment period. This reducer is valid for DELTA and GAUGE +// metrics with numeric and distribution values. The value_type of the +// output is the same as the value_type of the input. +// +// "REDUCE_STDDEV" - Reduce by computing the standard deviation across +// +// time series for each alignment period. This reducer is valid for +// DELTA and GAUGE metrics with numeric or distribution values. The +// value_type of the output is DOUBLE. +// +// "REDUCE_COUNT" - Reduce by computing the number of data points +// +// across time series for each alignment period. This reducer is valid +// for DELTA and GAUGE metrics of numeric, Boolean, distribution, and +// string value_type. The value_type of the output is INT64. +// +// "REDUCE_COUNT_TRUE" - Reduce by computing the number of True-valued +// +// data points across time series for each alignment period. This +// reducer is valid for DELTA and GAUGE metrics of Boolean value_type. +// The value_type of the output is INT64. +// +// "REDUCE_COUNT_FALSE" - Reduce by computing the number of +// +// False-valued data points across time series for each alignment +// period. This reducer is valid for DELTA and GAUGE metrics of Boolean +// value_type. The value_type of the output is INT64. +// +// "REDUCE_FRACTION_TRUE" - Reduce by computing the ratio of the +// +// number of True-valued data points to the total number of data points +// for each alignment period. This reducer is valid for DELTA and GAUGE +// metrics of Boolean value_type. The output value is in the range 0.0, +// 1.0 and has value_type DOUBLE. +// +// "REDUCE_PERCENTILE_99" - Reduce by computing the 99th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_95" - Reduce by computing the 95th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_50" - Reduce by computing the 50th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_05" - Reduce by computing the 5th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. func (c *ProjectsTimeSeriesListCall) AggregationCrossSeriesReducer(aggregationCrossSeriesReducer string) *ProjectsTimeSeriesListCall { c.urlParams_.Set("aggregation.crossSeriesReducer", aggregationCrossSeriesReducer) return c @@ -10479,26 +14634,148 @@ func (c *ProjectsTimeSeriesListCall) AggregationGroupByFields(aggregationGroupBy // specified and not equal to ALIGN_NONE and alignment_period must be // specified; otherwise, an error is returned. // -// Possible values: -// "ALIGN_NONE" -// "ALIGN_DELTA" -// "ALIGN_RATE" -// "ALIGN_INTERPOLATE" -// "ALIGN_NEXT_OLDER" -// "ALIGN_MIN" -// "ALIGN_MAX" -// "ALIGN_MEAN" -// "ALIGN_COUNT" -// "ALIGN_SUM" -// "ALIGN_STDDEV" -// "ALIGN_COUNT_TRUE" -// "ALIGN_COUNT_FALSE" -// "ALIGN_FRACTION_TRUE" -// "ALIGN_PERCENTILE_99" -// "ALIGN_PERCENTILE_95" -// "ALIGN_PERCENTILE_50" -// "ALIGN_PERCENTILE_05" -// "ALIGN_PERCENT_CHANGE" +// Possible values: +// +// "ALIGN_NONE" - No alignment. Raw data is returned. Not valid if +// +// cross-series reduction is requested. The value_type of the result is +// the same as the value_type of the input. +// +// "ALIGN_DELTA" - Align and convert to DELTA. The output is delta = +// +// y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If +// the selected alignment period results in periods with no data, then +// the aligned value for such a period is created by interpolation. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_RATE" - Align and convert to a rate. The result is computed +// +// as rate = (y1 - y0)/(t1 - t0), or "delta over time". Think of this +// aligner as providing the slope of the line that passes through the +// value at the start and at the end of the alignment_period.This +// aligner is valid for CUMULATIVE and DELTA metrics with numeric +// values. If the selected alignment period results in periods with no +// data, then the aligned value for such a period is created by +// interpolation. The output is a GAUGE metric with value_type +// DOUBLE.If, by "rate", you mean "percentage change", see the +// ALIGN_PERCENT_CHANGE aligner instead. +// +// "ALIGN_INTERPOLATE" - Align by interpolating between adjacent +// +// points around the alignment period boundary. This aligner is valid +// for GAUGE metrics with numeric values. The value_type of the aligned +// result is the same as the value_type of the input. +// +// "ALIGN_NEXT_OLDER" - Align by moving the most recent data point +// +// before the end of the alignment period to the boundary at the end of +// the alignment period. This aligner is valid for GAUGE metrics. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_MIN" - Align the time series by returning the minimum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MAX" - Align the time series by returning the maximum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MEAN" - Align the time series by returning the mean value in +// +// each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// DOUBLE. +// +// "ALIGN_COUNT" - Align the time series by returning the number of +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric or Boolean values. The value_type of the +// aligned result is INT64. +// +// "ALIGN_SUM" - Align the time series by returning the sum of the +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric and distribution values. The value_type of +// the aligned result is the same as the value_type of the input. +// +// "ALIGN_STDDEV" - Align the time series by returning the standard +// +// deviation of the values in each alignment period. This aligner is +// valid for GAUGE and DELTA metrics with numeric values. The value_type +// of the output is DOUBLE. +// +// "ALIGN_COUNT_TRUE" - Align the time series by returning the number +// +// of True values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_COUNT_FALSE" - Align the time series by returning the number +// +// of False values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_FRACTION_TRUE" - Align the time series by returning the +// +// ratio of the number of True values to the total number of values in +// each alignment period. This aligner is valid for GAUGE metrics with +// Boolean values. The output value is in the range 0.0, 1.0 and has +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_99" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 99th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_95" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 95th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_50" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 50th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_05" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 5th percentile of all data +// points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENT_CHANGE" - Align and convert to a percentage change. +// +// This aligner is valid for GAUGE and DELTA metrics with numeric +// values. This alignment returns ((current - previous)/previous) * 100, +// where the value of previous is determined based on the +// alignment_period.If the values of current and previous are both 0, +// then the returned value is 0. If only previous is 0, the returned +// value is infinity.A 10-minute moving mean is computed at each point +// of the alignment period prior to the above calculation to smooth the +// metric and prevent false positives from very short-lived spikes. The +// moving mean is only applicable for data whose values are >= 0. Any +// values < 0 are treated as a missing datapoint, and are ignored. While +// DELTA metrics are accepted by this alignment, special care should be +// taken that the values for the metric will always be positive. The +// output is a GAUGE metric with value_type DOUBLE. func (c *ProjectsTimeSeriesListCall) AggregationPerSeriesAligner(aggregationPerSeriesAligner string) *ProjectsTimeSeriesListCall { c.urlParams_.Set("aggregation.perSeriesAligner", aggregationPerSeriesAligner) return c @@ -10508,9 +14785,9 @@ func (c *ProjectsTimeSeriesListCall) AggregationPerSeriesAligner(aggregationPerS // filter (https://cloud.google.com/monitoring/api/v3/filters) that // specifies which time series should be returned. The filter must // specify a single metric type, and can additionally specify metric -// labels and other information. For example: -// metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND -// metric.labels.instance_name = "my-instance-name" +// labels and other information. For example: metric.type = +// "compute.googleapis.com/instance/cpu/usage_time" AND +// metric.labels.instance_name = "my-instance-name" func (c *ProjectsTimeSeriesListCall) Filter(filter string) *ProjectsTimeSeriesListCall { c.urlParams_.Set("filter", filter) return c @@ -10560,12 +14837,323 @@ func (c *ProjectsTimeSeriesListCall) PageToken(pageToken string) *ProjectsTimeSe return c } +// SecondaryAggregationAlignmentPeriod sets the optional parameter +// "secondaryAggregation.alignmentPeriod": The alignment_period +// specifies a time interval, in seconds, that is used to divide the +// data in all the time series into consistent blocks of time. This will +// be done before the per-series aligner can be applied to the data.The +// value must be at least 60 seconds. If a per-series aligner other than +// ALIGN_NONE is specified, this field is required or an error is +// returned. If no per-series aligner is specified, or the aligner +// ALIGN_NONE is specified, then this field is ignored.The maximum value +// of the alignment_period is 104 weeks (2 years) for charts, and 90,000 +// seconds (25 hours) for alerting policies. +func (c *ProjectsTimeSeriesListCall) SecondaryAggregationAlignmentPeriod(secondaryAggregationAlignmentPeriod string) *ProjectsTimeSeriesListCall { + c.urlParams_.Set("secondaryAggregation.alignmentPeriod", secondaryAggregationAlignmentPeriod) + return c +} + +// SecondaryAggregationCrossSeriesReducer sets the optional parameter +// "secondaryAggregation.crossSeriesReducer": The reduction operation to +// be used to combine time series into a single time series, where the +// value of each data point in the resulting series is a function of all +// the already aligned values in the input time series.Not all reducer +// operations can be applied to all time series. The valid choices +// depend on the metric_kind and the value_type of the original time +// series. Reduction can yield a time series with a different +// metric_kind or value_type than the input time series.Time series data +// must first be aligned (see per_series_aligner) in order to perform +// cross-time series reduction. If cross_series_reducer is specified, +// then per_series_aligner must be specified, and must not be +// ALIGN_NONE. An alignment_period must also be specified; otherwise, an +// error is returned. +// +// Possible values: +// +// "REDUCE_NONE" - No cross-time series reduction. The output of the +// +// Aligner is returned. +// +// "REDUCE_MEAN" - Reduce by computing the mean value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric or distribution values. The value_type of +// the output is DOUBLE. +// +// "REDUCE_MIN" - Reduce by computing the minimum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_MAX" - Reduce by computing the maximum value across time +// +// series for each alignment period. This reducer is valid for DELTA and +// GAUGE metrics with numeric values. The value_type of the output is +// the same as the value_type of the input. +// +// "REDUCE_SUM" - Reduce by computing the sum across time series for +// +// each alignment period. This reducer is valid for DELTA and GAUGE +// metrics with numeric and distribution values. The value_type of the +// output is the same as the value_type of the input. +// +// "REDUCE_STDDEV" - Reduce by computing the standard deviation across +// +// time series for each alignment period. This reducer is valid for +// DELTA and GAUGE metrics with numeric or distribution values. The +// value_type of the output is DOUBLE. +// +// "REDUCE_COUNT" - Reduce by computing the number of data points +// +// across time series for each alignment period. This reducer is valid +// for DELTA and GAUGE metrics of numeric, Boolean, distribution, and +// string value_type. The value_type of the output is INT64. +// +// "REDUCE_COUNT_TRUE" - Reduce by computing the number of True-valued +// +// data points across time series for each alignment period. This +// reducer is valid for DELTA and GAUGE metrics of Boolean value_type. +// The value_type of the output is INT64. +// +// "REDUCE_COUNT_FALSE" - Reduce by computing the number of +// +// False-valued data points across time series for each alignment +// period. This reducer is valid for DELTA and GAUGE metrics of Boolean +// value_type. The value_type of the output is INT64. +// +// "REDUCE_FRACTION_TRUE" - Reduce by computing the ratio of the +// +// number of True-valued data points to the total number of data points +// for each alignment period. This reducer is valid for DELTA and GAUGE +// metrics of Boolean value_type. The output value is in the range 0.0, +// 1.0 and has value_type DOUBLE. +// +// "REDUCE_PERCENTILE_99" - Reduce by computing the 99th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_95" - Reduce by computing the 95th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_50" - Reduce by computing the 50th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +// +// "REDUCE_PERCENTILE_05" - Reduce by computing the 5th percentile +// +// (https://en.wikipedia.org/wiki/Percentile) of data points across time +// series for each alignment period. This reducer is valid for GAUGE and +// DELTA metrics of numeric and distribution type. The value of the +// output is DOUBLE. +func (c *ProjectsTimeSeriesListCall) SecondaryAggregationCrossSeriesReducer(secondaryAggregationCrossSeriesReducer string) *ProjectsTimeSeriesListCall { + c.urlParams_.Set("secondaryAggregation.crossSeriesReducer", secondaryAggregationCrossSeriesReducer) + return c +} + +// SecondaryAggregationGroupByFields sets the optional parameter +// "secondaryAggregation.groupByFields": The set of fields to preserve +// when cross_series_reducer is specified. The group_by_fields determine +// how the time series are partitioned into subsets prior to applying +// the aggregation operation. Each subset contains time series that have +// the same value for each of the grouping fields. Each individual time +// series is a member of exactly one subset. The cross_series_reducer is +// applied to each subset of time series. It is not possible to reduce +// across different resource types, so this field implicitly contains +// resource.type. Fields not specified in group_by_fields are aggregated +// away. If group_by_fields is not specified and all the time series +// have the same resource type, then the time series are aggregated into +// a single output time series. If cross_series_reducer is not defined, +// this field is ignored. +func (c *ProjectsTimeSeriesListCall) SecondaryAggregationGroupByFields(secondaryAggregationGroupByFields ...string) *ProjectsTimeSeriesListCall { + c.urlParams_.SetMulti("secondaryAggregation.groupByFields", append([]string{}, secondaryAggregationGroupByFields...)) + return c +} + +// SecondaryAggregationPerSeriesAligner sets the optional parameter +// "secondaryAggregation.perSeriesAligner": An Aligner describes how to +// bring the data points in a single time series into temporal +// alignment. Except for ALIGN_NONE, all alignments cause all the data +// points in an alignment_period to be mathematically grouped together, +// resulting in a single data point for each alignment_period with end +// timestamp at the end of the period.Not all alignment operations may +// be applied to all time series. The valid choices depend on the +// metric_kind and value_type of the original time series. Alignment can +// change the metric_kind or the value_type of the time series.Time +// series data must be aligned in order to perform cross-time series +// reduction. If cross_series_reducer is specified, then +// per_series_aligner must be specified and not equal to ALIGN_NONE and +// alignment_period must be specified; otherwise, an error is returned. +// +// Possible values: +// +// "ALIGN_NONE" - No alignment. Raw data is returned. Not valid if +// +// cross-series reduction is requested. The value_type of the result is +// the same as the value_type of the input. +// +// "ALIGN_DELTA" - Align and convert to DELTA. The output is delta = +// +// y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If +// the selected alignment period results in periods with no data, then +// the aligned value for such a period is created by interpolation. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_RATE" - Align and convert to a rate. The result is computed +// +// as rate = (y1 - y0)/(t1 - t0), or "delta over time". Think of this +// aligner as providing the slope of the line that passes through the +// value at the start and at the end of the alignment_period.This +// aligner is valid for CUMULATIVE and DELTA metrics with numeric +// values. If the selected alignment period results in periods with no +// data, then the aligned value for such a period is created by +// interpolation. The output is a GAUGE metric with value_type +// DOUBLE.If, by "rate", you mean "percentage change", see the +// ALIGN_PERCENT_CHANGE aligner instead. +// +// "ALIGN_INTERPOLATE" - Align by interpolating between adjacent +// +// points around the alignment period boundary. This aligner is valid +// for GAUGE metrics with numeric values. The value_type of the aligned +// result is the same as the value_type of the input. +// +// "ALIGN_NEXT_OLDER" - Align by moving the most recent data point +// +// before the end of the alignment period to the boundary at the end of +// the alignment period. This aligner is valid for GAUGE metrics. The +// value_type of the aligned result is the same as the value_type of the +// input. +// +// "ALIGN_MIN" - Align the time series by returning the minimum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MAX" - Align the time series by returning the maximum value +// +// in each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// the same as the value_type of the input. +// +// "ALIGN_MEAN" - Align the time series by returning the mean value in +// +// each alignment period. This aligner is valid for GAUGE and DELTA +// metrics with numeric values. The value_type of the aligned result is +// DOUBLE. +// +// "ALIGN_COUNT" - Align the time series by returning the number of +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric or Boolean values. The value_type of the +// aligned result is INT64. +// +// "ALIGN_SUM" - Align the time series by returning the sum of the +// +// values in each alignment period. This aligner is valid for GAUGE and +// DELTA metrics with numeric and distribution values. The value_type of +// the aligned result is the same as the value_type of the input. +// +// "ALIGN_STDDEV" - Align the time series by returning the standard +// +// deviation of the values in each alignment period. This aligner is +// valid for GAUGE and DELTA metrics with numeric values. The value_type +// of the output is DOUBLE. +// +// "ALIGN_COUNT_TRUE" - Align the time series by returning the number +// +// of True values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_COUNT_FALSE" - Align the time series by returning the number +// +// of False values in each alignment period. This aligner is valid for +// GAUGE metrics with Boolean values. The value_type of the output is +// INT64. +// +// "ALIGN_FRACTION_TRUE" - Align the time series by returning the +// +// ratio of the number of True values to the total number of values in +// each alignment period. This aligner is valid for GAUGE metrics with +// Boolean values. The output value is in the range 0.0, 1.0 and has +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_99" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 99th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_95" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 95th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_50" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 50th percentile of all +// data points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENTILE_05" - Align the time series by using percentile +// +// aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting +// data point in each alignment period is the 5th percentile of all data +// points in the period. This aligner is valid for GAUGE and DELTA +// metrics with distribution values. The output is a GAUGE metric with +// value_type DOUBLE. +// +// "ALIGN_PERCENT_CHANGE" - Align and convert to a percentage change. +// +// This aligner is valid for GAUGE and DELTA metrics with numeric +// values. This alignment returns ((current - previous)/previous) * 100, +// where the value of previous is determined based on the +// alignment_period.If the values of current and previous are both 0, +// then the returned value is 0. If only previous is 0, the returned +// value is infinity.A 10-minute moving mean is computed at each point +// of the alignment period prior to the above calculation to smooth the +// metric and prevent false positives from very short-lived spikes. The +// moving mean is only applicable for data whose values are >= 0. Any +// values < 0 are treated as a missing datapoint, and are ignored. While +// DELTA metrics are accepted by this alignment, special care should be +// taken that the values for the metric will always be positive. The +// output is a GAUGE metric with value_type DOUBLE. +func (c *ProjectsTimeSeriesListCall) SecondaryAggregationPerSeriesAligner(secondaryAggregationPerSeriesAligner string) *ProjectsTimeSeriesListCall { + c.urlParams_.Set("secondaryAggregation.perSeriesAligner", secondaryAggregationPerSeriesAligner) + return c +} + // View sets the optional parameter "view": Required. Specifies which // information is returned about the time series. // // Possible values: -// "FULL" -// "HEADERS" +// +// "FULL" - Returns the identity of the metric(s), the time series, +// +// and the time series data. +// +// "HEADERS" - Returns the identity of the metric and the time series +// +// resource, but not the time series data. func (c *ProjectsTimeSeriesListCall) View(view string) *ProjectsTimeSeriesListCall { c.urlParams_.Set("view", view) return c @@ -10608,7 +15196,7 @@ func (c *ProjectsTimeSeriesListCall) Header() http.Header { func (c *ProjectsTimeSeriesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -10646,17 +15234,17 @@ func (c *ProjectsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTime if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListTimeSeriesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -10670,7 +15258,7 @@ func (c *ProjectsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTime } return ret, nil // { - // "description": "Lists time series that match a filter. This method does not require a Workspace.", + // "description": "Lists time series that match a filter.", // "flatPath": "v3/projects/{projectsId}/timeSeries", // "httpMethod": "GET", // "id": "monitoring.projects.timeSeries.list", @@ -10679,7 +15267,7 @@ func (c *ProjectsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTime // ], // "parameters": { // "aggregation.alignmentPeriod": { - // "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds, at most 104 weeks. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 2 years, or 104 weeks.", + // "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", // "format": "google-duration", // "location": "query", // "type": "string" @@ -10702,6 +15290,22 @@ func (c *ProjectsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTime // "REDUCE_PERCENTILE_50", // "REDUCE_PERCENTILE_05" // ], + // "enumDescriptions": [ + // "No cross-time series reduction. The output of the Aligner is returned.", + // "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + // ], // "location": "query", // "type": "string" // }, @@ -10734,11 +15338,32 @@ func (c *ProjectsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTime // "ALIGN_PERCENTILE_05", // "ALIGN_PERCENT_CHANGE" // ], + // "enumDescriptions": [ + // "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + // "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + // "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + // "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + // "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + // "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + // "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + // ], // "location": "query", // "type": "string" // }, // "filter": { - // "description": "Required. A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example:\nmetric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND\n metric.labels.instance_name = \"my-instance-name\"\n", + // "description": "Required. A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example: metric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND metric.labels.instance_name = \"my-instance-name\" ", // "location": "query", // "type": "string" // }, @@ -10755,7 +15380,7 @@ func (c *ProjectsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTime // "type": "string" // }, // "name": { - // "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name), organization or folder on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] organizations/[ORGANIZATION_ID] folders/[FOLDER_ID] ", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -10777,12 +15402,112 @@ func (c *ProjectsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTime // "location": "query", // "type": "string" // }, + // "secondaryAggregation.alignmentPeriod": { + // "description": "The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies.", + // "format": "google-duration", + // "location": "query", + // "type": "string" + // }, + // "secondaryAggregation.crossSeriesReducer": { + // "description": "The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned.", + // "enum": [ + // "REDUCE_NONE", + // "REDUCE_MEAN", + // "REDUCE_MIN", + // "REDUCE_MAX", + // "REDUCE_SUM", + // "REDUCE_STDDEV", + // "REDUCE_COUNT", + // "REDUCE_COUNT_TRUE", + // "REDUCE_COUNT_FALSE", + // "REDUCE_FRACTION_TRUE", + // "REDUCE_PERCENTILE_99", + // "REDUCE_PERCENTILE_95", + // "REDUCE_PERCENTILE_50", + // "REDUCE_PERCENTILE_05" + // ], + // "enumDescriptions": [ + // "No cross-time series reduction. The output of the Aligner is returned.", + // "Reduce by computing the mean value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the minimum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the maximum value across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the sum across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric and distribution values. The value_type of the output is the same as the value_type of the input.", + // "Reduce by computing the standard deviation across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics with numeric or distribution values. The value_type of the output is DOUBLE.", + // "Reduce by computing the number of data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of numeric, Boolean, distribution, and string value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of True-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the number of False-valued data points across time series for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The value_type of the output is INT64.", + // "Reduce by computing the ratio of the number of True-valued data points to the total number of data points for each alignment period. This reducer is valid for DELTA and GAUGE metrics of Boolean value_type. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Reduce by computing the 99th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 95th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 50th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE.", + // "Reduce by computing the 5th percentile (https://en.wikipedia.org/wiki/Percentile) of data points across time series for each alignment period. This reducer is valid for GAUGE and DELTA metrics of numeric and distribution type. The value of the output is DOUBLE." + // ], + // "location": "query", + // "type": "string" + // }, + // "secondaryAggregation.groupByFields": { + // "description": "The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored.", + // "location": "query", + // "repeated": true, + // "type": "string" + // }, + // "secondaryAggregation.perSeriesAligner": { + // "description": "An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned.", + // "enum": [ + // "ALIGN_NONE", + // "ALIGN_DELTA", + // "ALIGN_RATE", + // "ALIGN_INTERPOLATE", + // "ALIGN_NEXT_OLDER", + // "ALIGN_MIN", + // "ALIGN_MAX", + // "ALIGN_MEAN", + // "ALIGN_COUNT", + // "ALIGN_SUM", + // "ALIGN_STDDEV", + // "ALIGN_COUNT_TRUE", + // "ALIGN_COUNT_FALSE", + // "ALIGN_FRACTION_TRUE", + // "ALIGN_PERCENTILE_99", + // "ALIGN_PERCENTILE_95", + // "ALIGN_PERCENTILE_50", + // "ALIGN_PERCENTILE_05", + // "ALIGN_PERCENT_CHANGE" + // ], + // "enumDescriptions": [ + // "No alignment. Raw data is returned. Not valid if cross-series reduction is requested. The value_type of the result is the same as the value_type of the input.", + // "Align and convert to DELTA. The output is delta = y1 - y0.This alignment is valid for CUMULATIVE and DELTA metrics. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The value_type of the aligned result is the same as the value_type of the input.", + // "Align and convert to a rate. The result is computed as rate = (y1 - y0)/(t1 - t0), or \"delta over time\". Think of this aligner as providing the slope of the line that passes through the value at the start and at the end of the alignment_period.This aligner is valid for CUMULATIVE and DELTA metrics with numeric values. If the selected alignment period results in periods with no data, then the aligned value for such a period is created by interpolation. The output is a GAUGE metric with value_type DOUBLE.If, by \"rate\", you mean \"percentage change\", see the ALIGN_PERCENT_CHANGE aligner instead.", + // "Align by interpolating between adjacent points around the alignment period boundary. This aligner is valid for GAUGE metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align by moving the most recent data point before the end of the alignment period to the boundary at the end of the alignment period. This aligner is valid for GAUGE metrics. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the minimum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the maximum value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the mean value in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the aligned result is DOUBLE.", + // "Align the time series by returning the number of values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric or Boolean values. The value_type of the aligned result is INT64.", + // "Align the time series by returning the sum of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric and distribution values. The value_type of the aligned result is the same as the value_type of the input.", + // "Align the time series by returning the standard deviation of the values in each alignment period. This aligner is valid for GAUGE and DELTA metrics with numeric values. The value_type of the output is DOUBLE.", + // "Align the time series by returning the number of True values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the number of False values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The value_type of the output is INT64.", + // "Align the time series by returning the ratio of the number of True values to the total number of values in each alignment period. This aligner is valid for GAUGE metrics with Boolean values. The output value is in the range 0.0, 1.0 and has value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 99th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 95th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 50th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align the time series by using percentile aggregation (https://en.wikipedia.org/wiki/Percentile). The resulting data point in each alignment period is the 5th percentile of all data points in the period. This aligner is valid for GAUGE and DELTA metrics with distribution values. The output is a GAUGE metric with value_type DOUBLE.", + // "Align and convert to a percentage change. This aligner is valid for GAUGE and DELTA metrics with numeric values. This alignment returns ((current - previous)/previous) * 100, where the value of previous is determined based on the alignment_period.If the values of current and previous are both 0, then the returned value is 0. If only previous is 0, the returned value is infinity.A 10-minute moving mean is computed at each point of the alignment period prior to the above calculation to smooth the metric and prevent false positives from very short-lived spikes. The moving mean is only applicable for data whose values are \u003e= 0. Any values \u003c 0 are treated as a missing datapoint, and are ignored. While DELTA metrics are accepted by this alignment, special care should be taken that the values for the metric will always be positive. The output is a GAUGE metric with value_type DOUBLE." + // ], + // "location": "query", + // "type": "string" + // }, // "view": { // "description": "Required. Specifies which information is returned about the time series.", // "enum": [ // "FULL", // "HEADERS" // ], + // "enumDescriptions": [ + // "Returns the identity of the metric(s), the time series, and the time series data.", + // "Returns the identity of the metric and the time series resource, but not the time series data." + // ], // "location": "query", // "type": "string" // } @@ -10832,8 +15557,12 @@ type ProjectsTimeSeriesQueryCall struct { header_ http.Header } -// Query: Queries time series using Monitoring Query Language. This -// method does not require a Workspace. +// Query: Queries time series using Monitoring Query Language. +// +// - name: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) on which +// to execute the request. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. func (r *ProjectsTimeSeriesService) Query(name string, querytimeseriesrequest *QueryTimeSeriesRequest) *ProjectsTimeSeriesQueryCall { c := &ProjectsTimeSeriesQueryCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -10868,7 +15597,7 @@ func (c *ProjectsTimeSeriesQueryCall) Header() http.Header { func (c *ProjectsTimeSeriesQueryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -10908,17 +15637,17 @@ func (c *ProjectsTimeSeriesQueryCall) Do(opts ...googleapi.CallOption) (*QueryTi if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &QueryTimeSeriesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -10932,7 +15661,7 @@ func (c *ProjectsTimeSeriesQueryCall) Do(opts ...googleapi.CallOption) (*QueryTi } return ret, nil // { - // "description": "Queries time series using Monitoring Query Language. This method does not require a Workspace.", + // "description": "Queries time series using Monitoring Query Language.", // "flatPath": "v3/projects/{projectsId}/timeSeries:query", // "httpMethod": "POST", // "id": "monitoring.projects.timeSeries.query", @@ -10941,7 +15670,7 @@ func (c *ProjectsTimeSeriesQueryCall) Do(opts ...googleapi.CallOption) (*QueryTi // ], // "parameters": { // "name": { - // "description": "Required. The project on which to execute the request. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -10997,6 +15726,11 @@ type ProjectsUptimeCheckConfigsCreateCall struct { } // Create: Creates a new Uptime check configuration. +// +// - parent: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) in which +// to create the Uptime check. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. func (r *ProjectsUptimeCheckConfigsService) Create(parent string, uptimecheckconfig *UptimeCheckConfig) *ProjectsUptimeCheckConfigsCreateCall { c := &ProjectsUptimeCheckConfigsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -11031,7 +15765,7 @@ func (c *ProjectsUptimeCheckConfigsCreateCall) Header() http.Header { func (c *ProjectsUptimeCheckConfigsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -11071,17 +15805,17 @@ func (c *ProjectsUptimeCheckConfigsCreateCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UptimeCheckConfig{ ServerResponse: googleapi.ServerResponse{ @@ -11104,7 +15838,7 @@ func (c *ProjectsUptimeCheckConfigsCreateCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "parent": { - // "description": "Required. The project in which to create the Uptime check. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) in which to create the Uptime check. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -11140,6 +15874,9 @@ type ProjectsUptimeCheckConfigsDeleteCall struct { // will fail if the Uptime check configuration is referenced by an alert // policy or other dependent configs that would be rendered invalid by // the deletion. +// +// - name: The Uptime check configuration to delete. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]. func (r *ProjectsUptimeCheckConfigsService) Delete(name string) *ProjectsUptimeCheckConfigsDeleteCall { c := &ProjectsUptimeCheckConfigsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -11173,7 +15910,7 @@ func (c *ProjectsUptimeCheckConfigsDeleteCall) Header() http.Header { func (c *ProjectsUptimeCheckConfigsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -11208,17 +15945,17 @@ func (c *ProjectsUptimeCheckConfigsDeleteCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -11241,7 +15978,7 @@ func (c *ProjectsUptimeCheckConfigsDeleteCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "Required. The Uptime check configuration to delete. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]\n", + // "description": "Required. The Uptime check configuration to delete. The format is: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] ", // "location": "path", // "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$", // "required": true, @@ -11272,6 +16009,9 @@ type ProjectsUptimeCheckConfigsGetCall struct { } // Get: Gets a single Uptime check configuration. +// +// - name: The Uptime check configuration to retrieve. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]. func (r *ProjectsUptimeCheckConfigsService) Get(name string) *ProjectsUptimeCheckConfigsGetCall { c := &ProjectsUptimeCheckConfigsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -11315,7 +16055,7 @@ func (c *ProjectsUptimeCheckConfigsGetCall) Header() http.Header { func (c *ProjectsUptimeCheckConfigsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -11353,17 +16093,17 @@ func (c *ProjectsUptimeCheckConfigsGetCall) Do(opts ...googleapi.CallOption) (*U if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UptimeCheckConfig{ ServerResponse: googleapi.ServerResponse{ @@ -11386,7 +16126,7 @@ func (c *ProjectsUptimeCheckConfigsGetCall) Do(opts ...googleapi.CallOption) (*U // ], // "parameters": { // "name": { - // "description": "Required. The Uptime check configuration to retrieve. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]\n", + // "description": "Required. The Uptime check configuration to retrieve. The format is: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] ", // "location": "path", // "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$", // "required": true, @@ -11419,12 +16159,26 @@ type ProjectsUptimeCheckConfigsListCall struct { // List: Lists the existing valid Uptime check configurations for the // project (leaving out any invalid configurations). +// +// - parent: The project +// (https://cloud.google.com/monitoring/api/v3#project_name) whose +// Uptime check configurations are listed. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. func (r *ProjectsUptimeCheckConfigsService) List(parent string) *ProjectsUptimeCheckConfigsListCall { c := &ProjectsUptimeCheckConfigsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent return c } +// Filter sets the optional parameter "filter": If provided, this field +// specifies the criteria that must be met by uptime checks to be +// included in the response.For more details, see Filtering syntax +// (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering#filter_syntax). +func (c *ProjectsUptimeCheckConfigsListCall) Filter(filter string) *ProjectsUptimeCheckConfigsListCall { + c.urlParams_.Set("filter", filter) + return c +} + // PageSize sets the optional parameter "pageSize": The maximum number // of results to return in a single response. The server may further // constrain the maximum number of results returned in a single page. If @@ -11481,7 +16235,7 @@ func (c *ProjectsUptimeCheckConfigsListCall) Header() http.Header { func (c *ProjectsUptimeCheckConfigsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -11519,17 +16273,17 @@ func (c *ProjectsUptimeCheckConfigsListCall) Do(opts ...googleapi.CallOption) (* if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListUptimeCheckConfigsResponse{ ServerResponse: googleapi.ServerResponse{ @@ -11551,6 +16305,11 @@ func (c *ProjectsUptimeCheckConfigsListCall) Do(opts ...googleapi.CallOption) (* // "parent" // ], // "parameters": { + // "filter": { + // "description": "If provided, this field specifies the criteria that must be met by uptime checks to be included in the response.For more details, see Filtering syntax (https://cloud.google.com/monitoring/api/v3/sorting-and-filtering#filter_syntax).", + // "location": "query", + // "type": "string" + // }, // "pageSize": { // "description": "The maximum number of results to return in a single response. The server may further constrain the maximum number of results returned in a single page. If the page_size is \u003c=0, the server will decide the number of results to be returned.", // "format": "int32", @@ -11563,7 +16322,7 @@ func (c *ProjectsUptimeCheckConfigsListCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "parent": { - // "description": "Required. The project whose Uptime check configurations are listed. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) whose Uptime check configurations are listed. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", // "pattern": "^projects/[^/]+$", // "required": true, @@ -11619,6 +16378,14 @@ type ProjectsUptimeCheckConfigsPatchCall struct { // the entire configuration with a new one or replace only certain // fields in the current configuration by specifying the fields to be // updated via updateMask. Returns the updated configuration. +// +// - name: A unique resource name for this Uptime check configuration. +// The format is: +// projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] +// [PROJECT_ID_OR_NUMBER] is the Workspace host project associated +// with the Uptime check.This field should be omitted when creating +// the Uptime check configuration; on create, the resource name is +// assigned by the server and included in the response. func (r *ProjectsUptimeCheckConfigsService) Patch(name string, uptimecheckconfig *UptimeCheckConfig) *ProjectsUptimeCheckConfigsPatchCall { c := &ProjectsUptimeCheckConfigsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -11663,7 +16430,7 @@ func (c *ProjectsUptimeCheckConfigsPatchCall) Header() http.Header { func (c *ProjectsUptimeCheckConfigsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -11703,17 +16470,17 @@ func (c *ProjectsUptimeCheckConfigsPatchCall) Do(opts ...googleapi.CallOption) ( if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &UptimeCheckConfig{ ServerResponse: googleapi.ServerResponse{ @@ -11736,7 +16503,7 @@ func (c *ProjectsUptimeCheckConfigsPatchCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "A unique resource name for this Uptime check configuration. The format is:\n projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]\nThis field should be omitted when creating the Uptime check configuration; on create, the resource name is assigned by the server and included in the response.", + // "description": "A unique resource name for this Uptime check configuration. The format is: projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID] [PROJECT_ID_OR_NUMBER] is the Workspace host project associated with the Uptime check.This field should be omitted when creating the Uptime check configuration; on create, the resource name is assigned by the server and included in the response.", // "location": "path", // "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$", // "required": true, @@ -11776,6 +16543,11 @@ type ServicesCreateCall struct { } // Create: Create a Service. +// +// - parent: Resource name +// (https://cloud.google.com/monitoring/api/v3#project_name) of the +// parent Metrics Scope. The format is: +// projects/[PROJECT_ID_OR_NUMBER]. func (r *ServicesService) Create(parent string, service *MService) *ServicesCreateCall { c := &ServicesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -11818,7 +16590,7 @@ func (c *ServicesCreateCall) Header() http.Header { func (c *ServicesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -11858,17 +16630,17 @@ func (c *ServicesCreateCall) Do(opts ...googleapi.CallOption) (*MService, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MService{ ServerResponse: googleapi.ServerResponse{ @@ -11891,7 +16663,7 @@ func (c *ServicesCreateCall) Do(opts ...googleapi.CallOption) (*MService, error) // ], // "parameters": { // "parent": { - // "description": "Required. Resource name of the parent workspace. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. Resource name (https://cloud.google.com/monitoring/api/v3#project_name) of the parent Metrics Scope. The format is: projects/[PROJECT_ID_OR_NUMBER] ", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, @@ -11929,6 +16701,9 @@ type ServicesDeleteCall struct { } // Delete: Soft delete this Service. +// +// - name: Resource name of the Service to delete. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]. func (r *ServicesService) Delete(name string) *ServicesDeleteCall { c := &ServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -11962,7 +16737,7 @@ func (c *ServicesDeleteCall) Header() http.Header { func (c *ServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -11997,17 +16772,17 @@ func (c *ServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -12030,7 +16805,7 @@ func (c *ServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { // ], // "parameters": { // "name": { - // "description": "Required. Resource name of the Service to delete. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]\n", + // "description": "Required. Resource name of the Service to delete. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] ", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -12061,6 +16836,9 @@ type ServicesGetCall struct { } // Get: Get the named Service. +// +// - name: Resource name of the Service. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]. func (r *ServicesService) Get(name string) *ServicesGetCall { c := &ServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -12104,7 +16882,7 @@ func (c *ServicesGetCall) Header() http.Header { func (c *ServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -12142,17 +16920,17 @@ func (c *ServicesGetCall) Do(opts ...googleapi.CallOption) (*MService, error) { if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MService{ ServerResponse: googleapi.ServerResponse{ @@ -12175,7 +16953,7 @@ func (c *ServicesGetCall) Do(opts ...googleapi.CallOption) (*MService, error) { // ], // "parameters": { // "name": { - // "description": "Required. Resource name of the Service. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]\n", + // "description": "Required. Resource name of the Service. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] ", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -12206,7 +16984,14 @@ type ServicesListCall struct { header_ http.Header } -// List: List Services for this workspace. +// List: List Services for this Metrics Scope. +// +// - parent: Resource name of the parent containing the listed services, +// either a project +// (https://cloud.google.com/monitoring/api/v3#project_name) or a +// Monitoring Metrics Scope. The formats are: +// projects/[PROJECT_ID_OR_NUMBER] +// workspaces/[HOST_PROJECT_ID_OR_NUMBER]. func (r *ServicesService) List(parent string) *ServicesListCall { c := &ServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -12214,23 +16999,22 @@ func (r *ServicesService) List(parent string) *ServicesListCall { } // Filter sets the optional parameter "filter": A filter specifying what -// Services to return. The filter currently supports the following -// fields: -// - `identifier_case` -// - `app_engine.module_id` -// - `cloud_endpoints.service` -// - `mesh_istio.mesh_uid` -// - `mesh_istio.service_namespace` -// - `mesh_istio.service_name` -// - `cluster_istio.location` (deprecated) -// - `cluster_istio.cluster_name` (deprecated) -// - `cluster_istio.service_namespace` (deprecated) -// - `cluster_istio.service_name` (deprecated) -// identifier_case refers to which option in the identifier oneof is -// populated. For example, the filter identifier_case = "CUSTOM" would -// match all services with a value for the custom field. Valid options -// are "CUSTOM", "APP_ENGINE", "CLOUD_ENDPOINTS", "MESH_ISTIO", and -// "CLUSTER_ISTIO" (deprecated), +// Services to return. The filter supports filtering on a particular +// service-identifier type or one of its attributes.To filter on a +// particular service-identifier type, the identifier_case refers to +// which option in the identifier field is populated. For example, the +// filter identifier_case = "CUSTOM" would match all services with a +// value for the custom field. Valid options include "CUSTOM", +// "APP_ENGINE", "MESH_ISTIO", and the other options listed at +// https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#ServiceTo +// filter on an attribute of a service-identifier type, apply the filter +// name by using the snake case of the service-identifier type and the +// attribute of that service-identifier type, and join the two with a +// period. For example, to filter by the meshUid field of the MeshIstio +// service-identifier type, you must filter on mesh_istio.mesh_uid = +// "123" to match all services with mesh UID "123". Service-identifier +// types and their attributes are described at +// https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service func (c *ServicesListCall) Filter(filter string) *ServicesListCall { c.urlParams_.Set("filter", filter) return c @@ -12290,7 +17074,7 @@ func (c *ServicesListCall) Header() http.Header { func (c *ServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -12328,17 +17112,17 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListServicesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -12352,7 +17136,7 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon } return ret, nil // { - // "description": "List Services for this workspace.", + // "description": "List Services for this Metrics Scope.", // "flatPath": "v3/{v3Id}/{v3Id1}/services", // "httpMethod": "GET", // "id": "monitoring.services.list", @@ -12361,7 +17145,7 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon // ], // "parameters": { // "filter": { - // "description": "A filter specifying what Services to return. The filter currently supports the following fields:\n- `identifier_case`\n- `app_engine.module_id`\n- `cloud_endpoints.service`\n- `mesh_istio.mesh_uid`\n- `mesh_istio.service_namespace`\n- `mesh_istio.service_name`\n- `cluster_istio.location` (deprecated)\n- `cluster_istio.cluster_name` (deprecated)\n- `cluster_istio.service_namespace` (deprecated)\n- `cluster_istio.service_name` (deprecated)\nidentifier_case refers to which option in the identifier oneof is populated. For example, the filter identifier_case = \"CUSTOM\" would match all services with a value for the custom field. Valid options are \"CUSTOM\", \"APP_ENGINE\", \"CLOUD_ENDPOINTS\", \"MESH_ISTIO\", and \"CLUSTER_ISTIO\" (deprecated),", + // "description": "A filter specifying what Services to return. The filter supports filtering on a particular service-identifier type or one of its attributes.To filter on a particular service-identifier type, the identifier_case refers to which option in the identifier field is populated. For example, the filter identifier_case = \"CUSTOM\" would match all services with a value for the custom field. Valid options include \"CUSTOM\", \"APP_ENGINE\", \"MESH_ISTIO\", and the other options listed at https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#ServiceTo filter on an attribute of a service-identifier type, apply the filter name by using the snake case of the service-identifier type and the attribute of that service-identifier type, and join the two with a period. For example, to filter by the meshUid field of the MeshIstio service-identifier type, you must filter on mesh_istio.mesh_uid = \"123\" to match all services with mesh UID \"123\". Service-identifier types and their attributes are described at https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service", // "location": "query", // "type": "string" // }, @@ -12377,7 +17161,7 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon // "type": "string" // }, // "parent": { - // "description": "Required. Resource name of the parent containing the listed services, either a project or a Monitoring Workspace. The formats are:\nprojects/[PROJECT_ID_OR_NUMBER]\nworkspaces/[HOST_PROJECT_ID_OR_NUMBER]\n", + // "description": "Required. Resource name of the parent containing the listed services, either a project (https://cloud.google.com/monitoring/api/v3#project_name) or a Monitoring Metrics Scope. The formats are: projects/[PROJECT_ID_OR_NUMBER] workspaces/[HOST_PROJECT_ID_OR_NUMBER] ", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, @@ -12430,6 +17214,9 @@ type ServicesPatchCall struct { } // Patch: Update this Service. +// +// - name: Resource name for this Service. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]. func (r *ServicesService) Patch(name string, service *MService) *ServicesPatchCall { c := &ServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -12471,7 +17258,7 @@ func (c *ServicesPatchCall) Header() http.Header { func (c *ServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -12511,17 +17298,17 @@ func (c *ServicesPatchCall) Do(opts ...googleapi.CallOption) (*MService, error) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &MService{ ServerResponse: googleapi.ServerResponse{ @@ -12544,7 +17331,7 @@ func (c *ServicesPatchCall) Do(opts ...googleapi.CallOption) (*MService, error) // ], // "parameters": { // "name": { - // "description": "Resource name for this Service. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]\n", + // "description": "Resource name for this Service. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] ", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -12584,6 +17371,9 @@ type ServicesServiceLevelObjectivesCreateCall struct { } // Create: Create a ServiceLevelObjective for the given Service. +// +// - parent: Resource name of the parent Service. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]. func (r *ServicesServiceLevelObjectivesService) Create(parent string, servicelevelobjective *ServiceLevelObjective) *ServicesServiceLevelObjectivesCreateCall { c := &ServicesServiceLevelObjectivesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -12627,7 +17417,7 @@ func (c *ServicesServiceLevelObjectivesCreateCall) Header() http.Header { func (c *ServicesServiceLevelObjectivesCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -12667,17 +17457,17 @@ func (c *ServicesServiceLevelObjectivesCreateCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceLevelObjective{ ServerResponse: googleapi.ServerResponse{ @@ -12700,7 +17490,7 @@ func (c *ServicesServiceLevelObjectivesCreateCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "parent": { - // "description": "Required. Resource name of the parent Service. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]\n", + // "description": "Required. Resource name of the parent Service. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] ", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -12738,6 +17528,11 @@ type ServicesServiceLevelObjectivesDeleteCall struct { } // Delete: Delete the given ServiceLevelObjective. +// +// - name: Resource name of the ServiceLevelObjective to delete. The +// format is: +// projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelOb +// jectives/[SLO_NAME]. func (r *ServicesServiceLevelObjectivesService) Delete(name string) *ServicesServiceLevelObjectivesDeleteCall { c := &ServicesServiceLevelObjectivesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -12771,7 +17566,7 @@ func (c *ServicesServiceLevelObjectivesDeleteCall) Header() http.Header { func (c *ServicesServiceLevelObjectivesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -12806,17 +17601,17 @@ func (c *ServicesServiceLevelObjectivesDeleteCall) Do(opts ...googleapi.CallOpti if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ @@ -12839,7 +17634,7 @@ func (c *ServicesServiceLevelObjectivesDeleteCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "name": { - // "description": "Required. Resource name of the ServiceLevelObjective to delete. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]\n", + // "description": "Required. Resource name of the ServiceLevelObjective to delete. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] ", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/serviceLevelObjectives/[^/]+$", // "required": true, @@ -12870,6 +17665,11 @@ type ServicesServiceLevelObjectivesGetCall struct { } // Get: Get a ServiceLevelObjective by name. +// +// - name: Resource name of the ServiceLevelObjective to get. The format +// is: +// projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelOb +// jectives/[SLO_NAME]. func (r *ServicesServiceLevelObjectivesService) Get(name string) *ServicesServiceLevelObjectivesGetCall { c := &ServicesServiceLevelObjectivesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -12883,9 +17683,19 @@ func (r *ServicesServiceLevelObjectivesService) Get(name string) *ServicesServic // BasicSli with a RequestBasedSli spelling out how the SLI is computed. // // Possible values: -// "VIEW_UNSPECIFIED" -// "FULL" -// "EXPLICIT" +// +// "VIEW_UNSPECIFIED" - Same as FULL. +// "FULL" - Return the embedded ServiceLevelIndicator in the form in +// +// which it was defined. If it was defined using a BasicSli, return that +// BasicSli. +// +// "EXPLICIT" - For ServiceLevelIndicators using BasicSli +// +// articulation, instead return the ServiceLevelIndicator with its mode +// of computation fully spelled out as a RequestBasedSli. For +// ServiceLevelIndicators using RequestBasedSli or WindowsBasedSli, +// return the ServiceLevelIndicator as it was provided. func (c *ServicesServiceLevelObjectivesGetCall) View(view string) *ServicesServiceLevelObjectivesGetCall { c.urlParams_.Set("view", view) return c @@ -12928,7 +17738,7 @@ func (c *ServicesServiceLevelObjectivesGetCall) Header() http.Header { func (c *ServicesServiceLevelObjectivesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -12966,17 +17776,17 @@ func (c *ServicesServiceLevelObjectivesGetCall) Do(opts ...googleapi.CallOption) if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceLevelObjective{ ServerResponse: googleapi.ServerResponse{ @@ -12999,7 +17809,7 @@ func (c *ServicesServiceLevelObjectivesGetCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "Required. Resource name of the ServiceLevelObjective to get. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]\n", + // "description": "Required. Resource name of the ServiceLevelObjective to get. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] ", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/serviceLevelObjectives/[^/]+$", // "required": true, @@ -13012,6 +17822,11 @@ func (c *ServicesServiceLevelObjectivesGetCall) Do(opts ...googleapi.CallOption) // "FULL", // "EXPLICIT" // ], + // "enumDescriptions": [ + // "Same as FULL.", + // "Return the embedded ServiceLevelIndicator in the form in which it was defined. If it was defined using a BasicSli, return that BasicSli.", + // "For ServiceLevelIndicators using BasicSli articulation, instead return the ServiceLevelIndicator with its mode of computation fully spelled out as a RequestBasedSli. For ServiceLevelIndicators using RequestBasedSli or WindowsBasedSli, return the ServiceLevelIndicator as it was provided." + // ], // "location": "query", // "type": "string" // } @@ -13041,6 +17856,11 @@ type ServicesServiceLevelObjectivesListCall struct { } // List: List the ServiceLevelObjectives for the given Service. +// +// - parent: Resource name of the parent containing the listed SLOs, +// either a project or a Monitoring Metrics Scope. The formats are: +// projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] +// workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/-. func (r *ServicesServiceLevelObjectivesService) List(parent string) *ServicesServiceLevelObjectivesListCall { c := &ServicesServiceLevelObjectivesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -13078,9 +17898,19 @@ func (c *ServicesServiceLevelObjectivesListCall) PageToken(pageToken string) *Se // BasicSli with a RequestBasedSli spelling out how the SLI is computed. // // Possible values: -// "VIEW_UNSPECIFIED" -// "FULL" -// "EXPLICIT" +// +// "VIEW_UNSPECIFIED" - Same as FULL. +// "FULL" - Return the embedded ServiceLevelIndicator in the form in +// +// which it was defined. If it was defined using a BasicSli, return that +// BasicSli. +// +// "EXPLICIT" - For ServiceLevelIndicators using BasicSli +// +// articulation, instead return the ServiceLevelIndicator with its mode +// of computation fully spelled out as a RequestBasedSli. For +// ServiceLevelIndicators using RequestBasedSli or WindowsBasedSli, +// return the ServiceLevelIndicator as it was provided. func (c *ServicesServiceLevelObjectivesListCall) View(view string) *ServicesServiceLevelObjectivesListCall { c.urlParams_.Set("view", view) return c @@ -13123,7 +17953,7 @@ func (c *ServicesServiceLevelObjectivesListCall) Header() http.Header { func (c *ServicesServiceLevelObjectivesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -13162,17 +17992,17 @@ func (c *ServicesServiceLevelObjectivesListCall) Do(opts ...googleapi.CallOption if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListServiceLevelObjectivesResponse{ ServerResponse: googleapi.ServerResponse{ @@ -13211,7 +18041,7 @@ func (c *ServicesServiceLevelObjectivesListCall) Do(opts ...googleapi.CallOption // "type": "string" // }, // "parent": { - // "description": "Required. Resource name of the parent containing the listed SLOs, either a project or a Monitoring Workspace. The formats are:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]\nworkspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/-\n", + // "description": "Required. Resource name of the parent containing the listed SLOs, either a project or a Monitoring Metrics Scope. The formats are: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID] workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/- ", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -13224,6 +18054,11 @@ func (c *ServicesServiceLevelObjectivesListCall) Do(opts ...googleapi.CallOption // "FULL", // "EXPLICIT" // ], + // "enumDescriptions": [ + // "Same as FULL.", + // "Return the embedded ServiceLevelIndicator in the form in which it was defined. If it was defined using a BasicSli, return that BasicSli.", + // "For ServiceLevelIndicators using BasicSli articulation, instead return the ServiceLevelIndicator with its mode of computation fully spelled out as a RequestBasedSli. For ServiceLevelIndicators using RequestBasedSli or WindowsBasedSli, return the ServiceLevelIndicator as it was provided." + // ], // "location": "query", // "type": "string" // } @@ -13274,6 +18109,10 @@ type ServicesServiceLevelObjectivesPatchCall struct { } // Patch: Update the given ServiceLevelObjective. +// +// - name: Resource name for this ServiceLevelObjective. The format is: +// projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelOb +// jectives/[SLO_NAME]. func (r *ServicesServiceLevelObjectivesService) Patch(name string, servicelevelobjective *ServiceLevelObjective) *ServicesServiceLevelObjectivesPatchCall { c := &ServicesServiceLevelObjectivesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -13315,7 +18154,7 @@ func (c *ServicesServiceLevelObjectivesPatchCall) Header() http.Header { func (c *ServicesServiceLevelObjectivesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -13355,17 +18194,17 @@ func (c *ServicesServiceLevelObjectivesPatchCall) Do(opts ...googleapi.CallOptio if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ServiceLevelObjective{ ServerResponse: googleapi.ServerResponse{ @@ -13388,7 +18227,7 @@ func (c *ServicesServiceLevelObjectivesPatchCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "name": { - // "description": "Resource name for this ServiceLevelObjective. The format is:\nprojects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]\n", + // "description": "Resource name for this ServiceLevelObjective. The format is: projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME] ", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+/serviceLevelObjectives/[^/]+$", // "required": true, @@ -13489,7 +18328,7 @@ func (c *UptimeCheckIpsListCall) Header() http.Header { func (c *UptimeCheckIpsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200728") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } @@ -13524,17 +18363,17 @@ func (c *UptimeCheckIpsListCall) Do(opts ...googleapi.CallOption) (*ListUptimeCh if res.Body != nil { res.Body.Close() } - return nil, &googleapi.Error{ + return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, - } + }) } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return nil, gensupport.WrapError(err) } ret := &ListUptimeCheckIpsResponse{ ServerResponse: googleapi.ServerResponse{ diff --git a/prometheus-to-sd/vendor/google.golang.org/api/option/credentials_go19.go b/prometheus-to-sd/vendor/google.golang.org/api/option/credentials_go19.go deleted file mode 100644 index d06f918b0..000000000 --- a/prometheus-to-sd/vendor/google.golang.org/api/option/credentials_go19.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2018 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package option - -import ( - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" -) - -type withCreds google.Credentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.Credentials = (*google.Credentials)(w) -} - -// WithCredentials returns a ClientOption that authenticates API calls. -func WithCredentials(creds *google.Credentials) ClientOption { - return (*withCreds)(creds) -} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/option/credentials_notgo19.go b/prometheus-to-sd/vendor/google.golang.org/api/option/credentials_notgo19.go deleted file mode 100644 index 0ce107a62..000000000 --- a/prometheus-to-sd/vendor/google.golang.org/api/option/credentials_notgo19.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package option - -import ( - "golang.org/x/oauth2/google" - "google.golang.org/api/internal" -) - -type withCreds google.DefaultCredentials - -func (w *withCreds) Apply(o *internal.DialSettings) { - o.Credentials = (*google.DefaultCredentials)(w) -} - -func WithCredentials(creds *google.DefaultCredentials) ClientOption { - return (*withCreds)(creds) -} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/option/internaloption/internaloption.go b/prometheus-to-sd/vendor/google.golang.org/api/option/internaloption/internaloption.go index d5debb763..cc7ebfe27 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -6,6 +6,7 @@ package internaloption import ( + "golang.org/x/oauth2/google" "google.golang.org/api/internal" "google.golang.org/api/option" ) @@ -20,7 +21,7 @@ func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { // // It should only be used internally by generated clients. // -// This is similar to WithEndpoint, but allows us to determine whether the user has overriden the default endpoint. +// This is similar to WithEndpoint, but allows us to determine whether the user has overridden the default endpoint. func WithDefaultEndpoint(url string) option.ClientOption { return defaultEndpointOption(url) } @@ -50,3 +51,93 @@ type skipDialSettingsValidation struct{} func (s skipDialSettingsValidation) Apply(settings *internal.DialSettings) { settings.SkipValidation = true } + +// EnableDirectPath returns a ClientOption that overrides the default +// attempt to use DirectPath. +// +// It should only be used internally by generated clients. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func EnableDirectPath(dp bool) option.ClientOption { + return enableDirectPath(dp) +} + +type enableDirectPath bool + +func (e enableDirectPath) Apply(o *internal.DialSettings) { + o.EnableDirectPath = bool(e) +} + +// AllowNonDefaultServiceAccount returns a ClientOption that overrides the default +// requirement for using the default service account for DirectPath. +// +// It should only be used internally by generated clients. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func AllowNonDefaultServiceAccount(nd bool) option.ClientOption { + return allowNonDefaultServiceAccount(nd) +} + +type allowNonDefaultServiceAccount bool + +func (a allowNonDefaultServiceAccount) Apply(o *internal.DialSettings) { + o.AllowNonDefaultServiceAccount = bool(a) +} + +// WithDefaultAudience returns a ClientOption that specifies a default audience +// to be used as the audience field ("aud") for the JWT token authentication. +// +// It should only be used internally by generated clients. +func WithDefaultAudience(audience string) option.ClientOption { + return withDefaultAudience(audience) +} + +type withDefaultAudience string + +func (w withDefaultAudience) Apply(o *internal.DialSettings) { + o.DefaultAudience = string(w) +} + +// WithDefaultScopes returns a ClientOption that overrides the default OAuth2 +// scopes to be used for a service. +// +// It should only be used internally by generated clients. +func WithDefaultScopes(scope ...string) option.ClientOption { + return withDefaultScopes(scope) +} + +type withDefaultScopes []string + +func (w withDefaultScopes) Apply(o *internal.DialSettings) { + o.DefaultScopes = make([]string, len(w)) + copy(o.DefaultScopes, w) +} + +// EnableJwtWithScope returns a ClientOption that specifies if scope can be used +// with self-signed JWT. +func EnableJwtWithScope() option.ClientOption { + return enableJwtWithScope(true) +} + +type enableJwtWithScope bool + +func (w enableJwtWithScope) Apply(o *internal.DialSettings) { + o.EnableJwtWithScope = bool(w) +} + +// WithCredentials returns a client option to specify credentials which will be used to authenticate API calls. +// This credential takes precedence over all other credential options. +func WithCredentials(creds *google.Credentials) option.ClientOption { + return (*withCreds)(creds) +} + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.InternalCredentials = (*google.Credentials)(w) +} + +// EmbeddableAdapter is a no-op option.ClientOption that allow libraries to +// create their own client options by embedding this type into their own +// client-specific option wrapper. See example for usage. +type EmbeddableAdapter struct{} + +func (*EmbeddableAdapter) Apply(_ *internal.DialSettings) {} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/option/option.go b/prometheus-to-sd/vendor/google.golang.org/api/option/option.go index b7c40d60a..b2085a194 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/option/option.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/option/option.go @@ -10,7 +10,9 @@ import ( "net/http" "golang.org/x/oauth2" + "golang.org/x/oauth2/google" "google.golang.org/api/internal" + "google.golang.org/api/internal/impersonate" "google.golang.org/grpc" ) @@ -80,6 +82,9 @@ func (w withEndpoint) Apply(o *internal.DialSettings) { // WithScopes returns a ClientOption that overrides the default OAuth2 scopes // to be used for a service. +// +// If both WithScopes and WithTokenSource are used, scope settings from the +// token source will be used instead. func WithScopes(scope ...string) ClientOption { return withScopes(scope) } @@ -91,7 +96,9 @@ func (w withScopes) Apply(o *internal.DialSettings) { copy(o.Scopes, w) } -// WithUserAgent returns a ClientOption that sets the User-Agent. +// WithUserAgent returns a ClientOption that sets the User-Agent. This option +// is incompatible with the [WithHTTPClient] option. If you wish to provide a +// custom client you will need to add this header via RoundTripper middleware. func WithUserAgent(ua string) ClientOption { return withUA(ua) } @@ -143,8 +150,6 @@ func (w withGRPCDialOption) Apply(o *internal.DialSettings) { // WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC // connections that requests will be balanced between. -// -// This is an EXPERIMENTAL API and may be changed or removed in the future. func WithGRPCConnectionPool(size int) ClientOption { return withGRPCConnectionPool(size) } @@ -269,3 +274,72 @@ type withClientCertSource struct{ s ClientCertSource } func (w withClientCertSource) Apply(o *internal.DialSettings) { o.ClientCertSource = w.s } + +// ImpersonateCredentials returns a ClientOption that will impersonate the +// target service account. +// +// In order to impersonate the target service account +// the base service account must have the Service Account Token Creator role, +// roles/iam.serviceAccountTokenCreator, on the target service account. +// See https://cloud.google.com/iam/docs/understanding-service-accounts. +// +// Optionally, delegates can be used during impersonation if the base service +// account lacks the token creator role on the target. When using delegates, +// each service account must be granted roles/iam.serviceAccountTokenCreator +// on the next service account in the chain. +// +// For example, if a base service account of SA1 is trying to impersonate target +// service account SA2 while using delegate service accounts DSA1 and DSA2, +// the following must be true: +// +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// +// The resulting impersonated credential will either have the default scopes of +// the client being instantiating or the scopes from WithScopes if provided. +// Scopes are required for creating impersonated credentials, so if this option +// is used while not using a NewClient/NewService function, WithScopes must also +// be explicitly passed in as well. +// +// If the base credential is an authorized user and not a service account, or if +// the option WithQuotaProject is set, the target service account must have a +// role that grants the serviceusage.services.use permission such as +// roles/serviceusage.serviceUsageConsumer. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +// +// Deprecated: This option has been replaced by `impersonate` package: +// `google.golang.org/api/impersonate`. Please use the `impersonate` package +// instead with the WithTokenSource option. +func ImpersonateCredentials(target string, delegates ...string) ClientOption { + return impersonateServiceAccount{ + target: target, + delegates: delegates, + } +} + +type impersonateServiceAccount struct { + target string + delegates []string +} + +func (i impersonateServiceAccount) Apply(o *internal.DialSettings) { + o.ImpersonationConfig = &impersonate.Config{ + Target: i.target, + } + o.ImpersonationConfig.Delegates = make([]string, len(i.delegates)) + copy(o.ImpersonationConfig.Delegates, i.delegates) +} + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +// WithCredentials returns a ClientOption that authenticates API calls. +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/transport/cert/default_cert.go b/prometheus-to-sd/vendor/google.golang.org/api/transport/cert/default_cert.go deleted file mode 100644 index c03af65fd..000000000 --- a/prometheus-to-sd/vendor/google.golang.org/api/transport/cert/default_cert.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cert contains certificate tools for Google API clients. -// This package is intended to be used with crypto/tls.Config.GetClientCertificate. -// -// The certificates can be used to satisfy Google's Endpoint Validation. -// See https://cloud.google.com/endpoint-verification/docs/overview -// -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. -package cert - -import ( - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "os/user" - "path/filepath" - "sync" -) - -const ( - metadataPath = ".secureConnect" - metadataFile = "context_aware_metadata.json" -) - -var ( - defaultSourceOnce sync.Once - defaultSource Source - defaultSourceErr error -) - -// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. -type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) - -// DefaultSource returns a certificate source that execs the command specified -// in the file at ~/.secureConnect/context_aware_metadata.json -// -// If that file does not exist, a nil source is returned. -func DefaultSource() (Source, error) { - defaultSourceOnce.Do(func() { - defaultSource, defaultSourceErr = newSecureConnectSource() - }) - return defaultSource, defaultSourceErr -} - -type secureConnectSource struct { - metadata secureConnectMetadata -} - -type secureConnectMetadata struct { - Cmd []string `json:"cert_provider_command"` -} - -// newSecureConnectSource creates a secureConnectSource by reading the well-known file. -func newSecureConnectSource() (Source, error) { - user, err := user.Current() - if err != nil { - // Ignore. - return nil, nil - } - filename := filepath.Join(user.HomeDir, metadataPath, metadataFile) - file, err := ioutil.ReadFile(filename) - if os.IsNotExist(err) { - // Ignore. - return nil, nil - } - if err != nil { - return nil, err - } - - var metadata secureConnectMetadata - if err := json.Unmarshal(file, &metadata); err != nil { - return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err) - } - if err := validateMetadata(metadata); err != nil { - return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err) - } - return (&secureConnectSource{ - metadata: metadata, - }).getClientCertificate, nil -} - -func validateMetadata(metadata secureConnectMetadata) error { - if len(metadata.Cmd) == 0 { - return errors.New("empty cert_provider_command") - } - return nil -} - -func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { - // TODO(cbro): consider caching valid certificates rather than exec'ing every time. - command := s.metadata.Cmd - data, err := exec.Command(command[0], command[1:]...).Output() - if err != nil { - // TODO(cbro): read stderr for error message? Might contain sensitive info. - return nil, err - } - cert, err := tls.X509KeyPair(data, data) - if err != nil { - return nil, err - } - return &cert, nil -} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/transport/http/default_transport_go113.go b/prometheus-to-sd/vendor/google.golang.org/api/transport/http/default_transport_go113.go deleted file mode 100644 index 924f2704d..000000000 --- a/prometheus-to-sd/vendor/google.golang.org/api/transport/http/default_transport_go113.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.13 - -package http - -import "net/http" - -// clonedTransport returns the given RoundTripper as a cloned *http.Transport. -// It returns nil if the RoundTripper can't be cloned or coerced to -// *http.Transport. -func clonedTransport(rt http.RoundTripper) *http.Transport { - t, ok := rt.(*http.Transport) - if !ok { - return nil - } - return t.Clone() -} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go b/prometheus-to-sd/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go deleted file mode 100644 index 3cb16c6cb..000000000 --- a/prometheus-to-sd/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.13 - -package http - -import "net/http" - -// clonedTransport returns the given RoundTripper as a cloned *http.Transport. -// For versions of Go <1.13, this is not supported, so return nil. -func clonedTransport(rt http.RoundTripper) *http.Transport { - return nil -} diff --git a/prometheus-to-sd/vendor/google.golang.org/api/transport/http/dial.go b/prometheus-to-sd/vendor/google.golang.org/api/transport/http/dial.go index 445030141..403509d08 100644 --- a/prometheus-to-sd/vendor/google.golang.org/api/transport/http/dial.go +++ b/prometheus-to-sd/vendor/google.golang.org/api/transport/http/dial.go @@ -13,26 +13,18 @@ import ( "errors" "net" "net/http" - "net/url" - "os" - "strings" "time" "go.opencensus.io/plugin/ochttp" + "golang.org/x/net/http2" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" "google.golang.org/api/internal" + "google.golang.org/api/internal/cert" "google.golang.org/api/option" - "google.golang.org/api/transport/cert" "google.golang.org/api/transport/http/internal/propagation" ) -const ( - mTLSModeAlways = "always" - mTLSModeNever = "never" - mTLSModeAuto = "auto" -) - // NewClient returns an HTTP client for use communicating with a Google cloud // service, configured with the given ClientOptions. It also returns the endpoint // for the service as specified in the options. @@ -41,11 +33,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } - clientCertSource, err := getClientCertificateSource(settings) - if err != nil { - return nil, "", err - } - endpoint, err := getEndpoint(settings, clientCertSource) + clientCertSource, endpoint, err := internal.GetClientCertificateSourceAndEndpoint(settings) if err != nil { return nil, "", err } @@ -77,7 +65,6 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna paramTransport := ¶meterTransport{ base: base, userAgent: settings.UserAgent, - quotaProject: settings.QuotaProject, requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport @@ -86,6 +73,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna case settings.NoAuth: // Do nothing. case settings.APIKey != "": + paramTransport.quotaProject = internal.GetQuotaProject(nil, settings.QuotaProject) trans = &transport.APIKey{ Transport: trans, Key: settings.APIKey, @@ -95,12 +83,9 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } - if paramTransport.quotaProject == "" { - paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds) - } - + paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) ts := creds.TokenSource - if settings.TokenSource != nil { + if settings.ImpersonationConfig == nil && settings.TokenSource != nil { ts = settings.TokenSource } trans = &oauth2.Transport{ @@ -187,9 +172,22 @@ func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) htt } } + configureHTTP2(trans) + return trans } +// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the +// transport. This allows broken idle connections to be pruned more quickly, +// preventing the client from attempting to re-use connections that will no +// longer work. +func configureHTTP2(trans *http.Transport) { + http2Trans, err := http2.ConfigureTransports(trans) + if err == nil { + http2Trans.ReadIdleTimeout = time.Second * 31 + } +} + // fallbackBaseTransport is used in " or "project:". + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // Describes what error is encountered when accessing this resource. + // For example, updating a cloud project may require the `writer` permission + // on the developer console project. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *ResourceInfo) Reset() { + *x = ResourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceInfo) ProtoMessage() {} + +func (x *ResourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead. +func (*ResourceInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7} +} + +func (x *ResourceInfo) GetResourceType() string { + if x != nil { + return x.ResourceType + } + return "" +} + +func (x *ResourceInfo) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +func (x *ResourceInfo) GetOwner() string { + if x != nil { + return x.Owner + } + return "" +} + +func (x *ResourceInfo) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +type Help struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // URL(s) pointing to additional information on handling the current error. + Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` +} + +func (x *Help) Reset() { + *x = Help{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help) ProtoMessage() {} + +func (x *Help) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help.ProtoReflect.Descriptor instead. +func (*Help) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8} +} + +func (x *Help) GetLinks() []*Help_Link { + if x != nil { + return x.Links + } + return nil +} + +// Provides a localized error message that is safe to return to the user +// which can be attached to an RPC error. +type LocalizedMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The locale used following the specification defined at + // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` + // The localized error message in the above locale. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *LocalizedMessage) Reset() { + *x = LocalizedMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalizedMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalizedMessage) ProtoMessage() {} + +func (x *LocalizedMessage) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead. +func (*LocalizedMessage) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9} +} + +func (x *LocalizedMessage) GetLocale() string { + if x != nil { + return x.Locale + } + return "" +} + +func (x *LocalizedMessage) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +// A message type used to describe a single quota violation. For example, a +// daily quota or a custom quota that was exceeded. +type QuotaFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The subject on which the quota check failed. + // For example, "clientip:" or "project:". + Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *QuotaFailure_Violation) Reset() { + *x = QuotaFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuotaFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaFailure_Violation) ProtoMessage() {} + +func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead. +func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *QuotaFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *QuotaFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single precondition failure. +type PreconditionFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of PreconditionFailure. We recommend using a service-specific + // enum type to define the supported precondition violation subjects. For + // example, "TOS" for "Terms of Service violation". + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The subject, relative to the type, that failed. + // For example, "google.com/cloud" relative to the "TOS" type would indicate + // which terms of service is being referenced. + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *PreconditionFailure_Violation) Reset() { + *x = PreconditionFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreconditionFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreconditionFailure_Violation) ProtoMessage() {} + +func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead. +func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *PreconditionFailure_Violation) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PreconditionFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *PreconditionFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single bad request field. +type BadRequest_FieldViolation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A path that leads to a field in the request body. The value will be a + // sequence of dot-separated identifiers that identify a protocol buffer + // field. + // + // Consider the following: + // + // message CreateContactRequest { + // message EmailAddress { + // enum Type { + // TYPE_UNSPECIFIED = 0; + // HOME = 1; + // WORK = 2; + // } + // + // optional string email = 1; + // repeated EmailType type = 2; + // } + // + // string full_name = 1; + // repeated EmailAddress email_addresses = 2; + // } + // + // In this example, in proto `field` could take one of the following values: + // + // - `full_name` for a violation in the `full_name` value + // - `email_addresses[1].email` for a violation in the `email` field of the + // first `email_addresses` message + // - `email_addresses[3].type[2]` for a violation in the second `type` + // value in the third `email_addresses` message. + // + // In JSON, the same values are represented as: + // + // - `fullName` for a violation in the `fullName` value + // - `emailAddresses[1].email` for a violation in the `email` field of the + // first `emailAddresses` message + // - `emailAddresses[3].type[2]` for a violation in the second `type` + // value in the third `emailAddresses` message. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A description of why the request element is bad. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *BadRequest_FieldViolation) Reset() { + *x = BadRequest_FieldViolation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BadRequest_FieldViolation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BadRequest_FieldViolation) ProtoMessage() {} + +func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead. +func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *BadRequest_FieldViolation) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *BadRequest_FieldViolation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Describes a URL link. +type Help_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes what the link offers. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The URL of the link. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *Help_Link) Reset() { + *x = Help_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help_Link) ProtoMessage() {} + +func (x *Help_Link) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead. +func (*Help_Link) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *Help_Link) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Help_Link) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +var File_google_rpc_error_details_proto protoreflect.FileDescriptor + +var file_google_rpc_error_details_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x01, 0x0a, + 0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, + 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x09, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, + 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c, + 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a, + 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, + 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, + 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, + 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, + 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_error_details_proto_rawDescOnce sync.Once + file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc +) + +func file_google_rpc_error_details_proto_rawDescGZIP() []byte { + file_google_rpc_error_details_proto_rawDescOnce.Do(func() { + file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData) + }) + return file_google_rpc_error_details_proto_rawDescData +} + +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_rpc_error_details_proto_goTypes = []interface{}{ + (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo + (*RetryInfo)(nil), // 1: google.rpc.RetryInfo + (*DebugInfo)(nil), // 2: google.rpc.DebugInfo + (*QuotaFailure)(nil), // 3: google.rpc.QuotaFailure + (*PreconditionFailure)(nil), // 4: google.rpc.PreconditionFailure + (*BadRequest)(nil), // 5: google.rpc.BadRequest + (*RequestInfo)(nil), // 6: google.rpc.RequestInfo + (*ResourceInfo)(nil), // 7: google.rpc.ResourceInfo + (*Help)(nil), // 8: google.rpc.Help + (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage + nil, // 10: google.rpc.ErrorInfo.MetadataEntry + (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation + (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation + (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation + (*Help_Link)(nil), // 14: google.rpc.Help.Link + (*durationpb.Duration)(nil), // 15: google.protobuf.Duration +} +var file_google_rpc_error_details_proto_depIdxs = []int32{ + 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation + 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation + 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation + 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_rpc_error_details_proto_init() } +func file_google_rpc_error_details_proto_init() { + if File_google_rpc_error_details_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DebugInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalizedMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest_FieldViolation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_error_details_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_error_details_proto_goTypes, + DependencyIndexes: file_google_rpc_error_details_proto_depIdxs, + MessageInfos: file_google_rpc_error_details_proto_msgTypes, + }.Build() + File_google_rpc_error_details_proto = out.File + file_google_rpc_error_details_proto_rawDesc = nil + file_google_rpc_error_details_proto_goTypes = nil + file_google_rpc_error_details_proto_depIdxs = nil +} diff --git a/prometheus-to-sd/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/prometheus-to-sd/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index e79a53884..a6b508188 100644 --- a/prometheus-to-sd/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/prometheus-to-sd/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.21.9 // source: google/rpc/status.proto package status @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The `Status` type defines a logical error model that is suitable for // different programming environments, including REST APIs and RPC APIs. It is // used by [gRPC](https://github.com/grpc). Each `Status` message contains @@ -53,11 +48,13 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/.travis.yml b/prometheus-to-sd/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index 0e24e59f0..000000000 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: go - -matrix: - include: - - go: 1.14.x - env: VET=1 GO111MODULE=on - - go: 1.14.x - env: RACE=1 GO111MODULE=on - - go: 1.14.x - env: RUN386=1 - - go: 1.14.x - env: GRPC_GO_RETRY=on - - go: 1.14.x - env: TESTEXTRAS=1 - - go: 1.13.x - env: GO111MODULE=on - - go: 1.12.x - env: GO111MODULE=on - - go: 1.9.x - env: GAE=1 - -go_import_path: google.golang.org/grpc - -before_install: - - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi - -install: - - try3() { eval "$*" || eval "$*" || eval "$*"; } - - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi - -script: - - set -e - - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - - if [[ -n "${VET}" ]]; then ./vet.sh; fi - - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi - - make test diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/CONTRIBUTING.md b/prometheus-to-sd/vendor/google.golang.org/grpc/CONTRIBUTING.md index 4f1567e2f..608aa6e1a 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -20,6 +20,15 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a @@ -32,14 +41,18 @@ How to get your contributions merged smoothly and quickly. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. -- Don't fix code style and formatting unless you are already changing that line - to address an issue. PRs with irrelevant changes won't be merged. If you do - want to fix formatting or style, do that in a separate PR. +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We expect you to be reasonably - responsive to those comments, otherwise the PR will be closed after 2-3 weeks - of inactivity. + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use @@ -53,10 +66,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode - - optional `make testappengine` to run tests with appengine + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/MAINTAINERS.md b/prometheus-to-sd/vendor/google.golang.org/grpc/MAINTAINERS.md index 093c82b3a..c6672c0a3 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -8,17 +8,18 @@ See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIB for general contribution guidelines. ## Maintainers (in alphabetical order) -- [canguler](https://github.com/canguler), Google LLC + - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC - [menghanl](https://github.com/menghanl), Google LLC - [srini100](https://github.com/srini100), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC - [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC - [jtattermusch](https://github.com/jtattermusch), Google LLC - [lyuxuan](https://github.com/lyuxuan), Google LLC - [makmukhi](https://github.com/makmukhi), Google LLC diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/Makefile b/prometheus-to-sd/vendor/google.golang.org/grpc/Makefile index 410f7d56d..1f8960922 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/Makefile +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/Makefile @@ -1,13 +1,13 @@ all: vet test testrace -build: deps +build: go build google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... deps: - go get -d -v google.golang.org/grpc/... + GO111MODULE=on go get -d -v google.golang.org/grpc/... proto: @ if ! which protoc > /dev/null; then \ @@ -16,29 +16,18 @@ proto: fi go generate google.golang.org/grpc/... -test: testdeps +test: go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... -testsubmodule: testdeps +testsubmodule: cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... -testappengine: testappenginedeps - goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -testappenginedeps: - goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -testrace: testdeps +testrace: go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... vet: vetdeps ./vet.sh @@ -50,14 +39,8 @@ vetdeps: all \ build \ clean \ - deps \ proto \ test \ - testappengine \ - testappenginedeps \ - testdeps \ testrace \ - updatedeps \ - updatetestdeps \ vet \ vetdeps diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/NOTICE.txt b/prometheus-to-sd/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 000000000..530197749 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/README.md b/prometheus-to-sd/vendor/google.golang.org/grpc/README.md index fef78e4a1..0e6ae69a5 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/README.md +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/README.md @@ -1,7 +1,7 @@ # gRPC-Go [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) -[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)][API] +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general @@ -131,11 +131,11 @@ the root cause of the connection being closed is on the server side. Turn on logging on __both client and server__, and see if there are any transport errors. -[API]: https://grpc.io/docs/languages/go/api +[API]: https://pkg.go.dev/google.golang.org/grpc [Go]: https://golang.org [Go module]: https://github.com/golang/go/wiki/Modules [gRPC]: https://grpc.io [Go gRPC docs]: https://grpc.io/docs/languages/go -[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696 +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 [quick start]: https://grpc.io/docs/languages/go/quickstart [go-releases]: https://golang.org/doc/devel/release.html diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/SECURITY.md b/prometheus-to-sd/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 000000000..be6e10870 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/attributes/attributes.go b/prometheus-to-sd/vendor/google.golang.org/grpc/attributes/attributes.go index ee5c51e6c..3efca4591 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/attributes/attributes.go @@ -19,58 +19,112 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// All APIs in this package are EXPERIMENTAL. +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. package attributes -import "fmt" +import ( + "fmt" + "strings" +) // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own -// types for keys. +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. type Attributes struct { m map[interface{}]interface{} } -// New returns a new Attributes containing all key/value pairs in kvs. If the -// same key appears multiple times, the last value overwrites all previous -// values for that key. Panics if len(kvs) is not even. -func New(kvs ...interface{}) *Attributes { - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} - for i := 0; i < len(kvs)/2; i++ { - a.m[kvs[i*2]] = kvs[i*2+1] - } - return a +// New returns a new Attributes containing the key/value pair. +func New(key, value interface{}) *Attributes { + return &Attributes{m: map[interface{}]interface{}{key: value}} } -// WithValues returns a new Attributes containing all key/value pairs in a and -// kvs. Panics if len(kvs) is not even. If the same key appears multiple -// times, the last value overwrites all previous values for that key. To -// remove an existing key, use a nil value. -func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value interface{}) *Attributes { if a == nil { - return New(kvs...) - } - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } - for i := 0; i < len(kvs)/2; i++ { - n.m[kvs[i*2]] = kvs[i*2+1] - } + n.m[key] = value return n } // Value returns the value associated with these attributes for key, or nil if -// no value is associated with key. +// no value is associated with key. The returned value should not be modified. func (a *Attributes) Value(key interface{}) interface{} { if a == nil { return nil } return a.m[key] } + +// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes. If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + var key, val string + if str, ok := k.(interface{ String() string }); ok { + key = str.String() + } + if str, ok := v.(interface{ String() string }); ok { + val = str.String() + } + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) + first = false + } + sb.WriteString("}") + return sb.String() +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/backoff.go b/prometheus-to-sd/vendor/google.golang.org/grpc/backoff.go index ff7c3ee6f..29475e31c 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/backoff.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/backoff.go @@ -48,7 +48,10 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type ConnectParams struct { // Backoff specifies the configuration options for connection backoff. Backoff backoff.Config diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/balancer.go b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/balancer.go index 8bf359dbf..8f00523c0 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -75,24 +76,26 @@ func Get(name string) Builder { return nil } -// SubConn represents a gRPC sub connection. -// Each sub connection contains a list of addresses. gRPC will -// try to connect to them (in sequence), and stop trying the -// remainder once one connection is successful. +// A SubConn represents a single connection to a gRPC backend service. // -// The reconnect backoff will be applied on the list, not a single address. -// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// Each SubConn contains a list of addresses. // -// All SubConns start in IDLE, and will not try to connect. To trigger -// the connecting, Balancers must call Connect. -// When the connection encounters an error, it will reconnect immediately. -// When the connection becomes IDLE, it will not reconnect unless Connect is -// called. +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. // -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. @@ -101,9 +104,17 @@ type SubConn interface { // a new connection will be created. // // This will trigger a state transition for the SubConn. + // + // Deprecated: This method is now part of the ClientConn interface and will + // eventually be removed from here. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) } // NewSubConnOptions contains options to create new SubConn. @@ -143,6 +154,13 @@ type ClientConn interface { // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has // changed. @@ -162,21 +180,32 @@ type ClientConn interface { // BuildOptions contains additional information for Build. type BuildOptions struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle that the Balancer can use. + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. CredsBundle credentials.Bundle - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. Dialer func(context.Context, string) (net.Conn, error) - // ChannelzParentID is the entity parent's channelz unique identification number. - ChannelzParentID int64 - // Target contains the parsed address info of the dial target. It is the same resolver.Target as - // passed to the resolver. - // See the documentation for the resolver.Target type for details about what it contains. + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParentID is the parent ClientConn's channelz ID. + ChannelzParentID *channelz.Identifier + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. Target resolver.Target } @@ -220,7 +249,7 @@ type DoneInfo struct { // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // - // The only supported type now is *orca_v1.LoadReport. + // The only supported type now is *orca_v3.LoadReport. ServerLoad interface{} } @@ -250,6 +279,14 @@ type PickResult struct { // type, Done may not be called. May be nil if the balancer does not wish // to be notified when the RPC completes. Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and @@ -312,6 +349,20 @@ type Balancer interface { Close() } +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + // SubConnState describes the state of a SubConn. type SubConnState struct { // ConnectivityState is the connectivity state of the SubConn. @@ -334,41 +385,20 @@ type ClientConnState struct { // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) } -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else the aggregated state is TransientFailure. -// -// Idle and Shutdown are not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - return connectivity.TransientFailure +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/base/balancer.go b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/base/balancer.go index 32d782f1c..3929c26d3 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -41,10 +41,11 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]balancer.SubConn), + subConns: resolver.NewAddressMap(), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, + state: connectivity.Connecting, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we @@ -64,7 +65,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]balancer.SubConn + subConns *resolver.AddressMap scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -75,7 +76,7 @@ type baseBalancer struct { func (b *baseBalancer) ResolverError(err error) { b.resolverErr = err - if len(b.subConns) == 0 { + if b.subConns.Len() == 0 { b.state = connectivity.TransientFailure } @@ -99,26 +100,29 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := make(map[resolver.Address]struct{}) + addrsSet := resolver.NewAddressMap() for _, a := range s.ResolverState.Addresses { - addrsSet[a] = struct{}{} - if _, ok := b.subConns[a]; !ok { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[a] = sc + b.subConns.Set(a, sc) b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() } } - for a, sc := range b.subConns { + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { + if _, ok := addrsSet.Get(a); !ok { b.cc.RemoveSubConn(sc) - delete(b.subConns, a) + b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. } @@ -131,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) return nil } @@ -150,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error { // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { b.picker = NewErrPicker(b.mergeErrors()) @@ -160,7 +167,9 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[sc] = SubConnInfo{Address: addr} } @@ -180,10 +189,14 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } return } - if oldS == connectivity.TransientFailure && s == connectivity.Connecting { - // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or // CONNECTING transitions to prevent the aggregated state from being // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } return } b.scStates[sc] = s @@ -209,7 +222,6 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su b.state == connectivity.TransientFailure { b.regeneratePicker() } - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } @@ -218,6 +230,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + // NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 000000000..c33413581 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go index a24264a34..4ecfa1c21 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -39,7 +39,7 @@ type State struct { // Set returns a copy of the provided state with attributes containing s. s's // data should not be mutated after calling Set. func Set(state resolver.State, s *State) resolver.State { - state.Attributes = state.Attributes.WithValues(key, s) + state.Attributes = state.Attributes.WithValue(key, s) return state } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 43c2a1537..f7031ad22 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" @@ -47,11 +47,11 @@ func init() { type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: newPicker called with info: %v", info) + logger.Infof("roundrobinPicker: Build called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } - var scs []balancer.SubConn + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) for sc := range info.ReadySCs { scs = append(scs, sc) } @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: grpcrand.Intn(len(scs)), + next: uint32(grpcrand.Intn(len(scs))), } } @@ -69,15 +69,13 @@ type rrPicker struct { // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn - - mu sync.Mutex - next int + next uint32 } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] return balancer.PickResult{SubConn: sc}, nil } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 11e592aab..04b9ad411 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -19,156 +19,343 @@ package grpc import ( + "context" "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} +type ccbMode int + +const ( + ccbModeActive = iota + ccbModeIdle + ccbModeClosed + ccbModeExitingIdle +) -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - scBuffer *buffer.Unbounded - done *grpcsync.Event + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + + // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled in the serializer. Fields + // accessed *only* in these serializer callbacks, can therefore be accessed + // without a mutex. + balancer *gracefulswitch.Balancer + curBalancerName string - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + // mu guards access to the below fields. Access to the serializer and its + // cancel function needs to be mutex protected because they are overwritten + // when the wrapper exits idle mode. + mu sync.Mutex + serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. + serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. + mode ccbMode // Tracks the current mode of the wrapper. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ - cc: cc, - scBuffer: buffer.NewUnbounded(), - done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), + cc: cc, + opts: bopts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case t := <-ccb.scBuffer.Get(): - ccb.scBuffer.Load() - if ccb.done.HasFired() { - break +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.mu.Lock() + errCh := make(chan error, 1) + // Here and everywhere else where Schedule() is called, it is done with the + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { + // If the addresses specified in the update contain addresses of type + // "grpclb" and the selected LB policy is not "grpclb", these addresses + // will be filtered out and ccs will be modified with the updated + // address list. + if ccb.curBalancerName != grpclbName { + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) } - ccb.balancerMu.Lock() - su := t.(*scStateUpdate) - ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) - ccb.balancerMu.Unlock() - case <-ccb.done.Done(): + ccs.ResolverState.Addresses = addrs } + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { + // If we are unable to schedule a function with the serializer, it + // indicates that it has been closed. A serializer is only closed when + // the wrapper is closed or is in idle. + ccb.mu.Unlock() + return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") + } + ccb.mu.Unlock() - if ccb.done.HasFired() { - ccb.balancer.Close() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + // We get here only if the above call to Schedule succeeds, in which case it + // is guaranteed that the scheduled function will run. Therefore it is safe + // to block on this channel. + err := <-errCh + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + return err +} + +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) + ccb.mu.Unlock() +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ResolverError(err) + }) + ccb.mu.Unlock() +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { return } + ccb.buildLoadBalancingPolicy(name) + }) + ccb.mu.Unlock() +} + +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return } + ccb.curBalancerName = builder.Name() } func (ccb *ccBalancerWrapper) close() { - ccb.done.Fire() -} - -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.closeBalancer(ccbModeClosed) +} + +// enterIdleMode is invoked by grpc when the channel enters idle mode upon +// expiry of idle_timeout. This call blocks until the balancer is closed. +func (ccb *ccBalancerWrapper) enterIdleMode() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") + ccb.closeBalancer(ccbModeIdle) +} + +// closeBalancer is invoked when the channel is being closed or when it enters +// idle mode upon expiry of idle_timeout. +func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { + ccb.mu.Unlock() return } - ccb.scBuffer.Put(&scStateUpdate{ - sc: sc, - state: s, - err: err, + + ccb.mode = m + done := ccb.serializer.Done + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent + // to the balancer. + ccb.serializerCancel() + // Empty the current balancer name because we don't have a balancer + // anymore and also so that we act on the next call to switchTo by + // creating a new balancer specified by the new resolver. + ccb.curBalancerName = "" }) + if !ok { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-done + // Spawn a goroutine to close the balancer (since it may block trying to + // cleanup all allocated resources) and return early. + go b.Close() } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// exitIdleMode is invoked by grpc when the channel exits idle mode either +// because of an RPC or because of an invocation of the Connect() API. This +// recreates the balancer that was closed previously when entering idle mode. +// +// If the channel is not in idle mode, we know for a fact that we are here as a +// result of the user calling the Connect() method on the ClientConn. In this +// case, we can simply forward the call to the underlying balancer, instructing +// it to reconnect to the backends. +func (ccb *ccBalancerWrapper) exitIdleMode() { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed { + // Request to exit idle is a no-op when wrapper is already closed. + ccb.mu.Unlock() + return + } + + if ccb.mode == ccbModeIdle { + // Recreate the serializer which was closed when we entered idle. + ctx, cancel := context.WithCancel(context.Background()) + ccb.serializer = grpcsync.NewCallbackSerializer(ctx) + ccb.serializerCancel = cancel + } + + // The ClientConn guarantees that mutual exclusion between close() and + // exitIdleMode(), and since we just created a new serializer, we can be + // sure that the below function will be scheduled. + done := make(chan struct{}) + ccb.serializer.Schedule(func(_ context.Context) { + defer close(done) + + ccb.mu.Lock() + defer ccb.mu.Unlock() + + if ccb.mode != ccbModeIdle { + ccb.balancer.ExitIdle() + return + } + + // Gracefulswitch balancer does not support a switchTo operation after + // being closed. Hence we need to create a new one here. + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + ccb.mode = ccbModeActive + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") + + }) + ccb.mu.Unlock() + + <-done } -func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - ccb.balancer.ResolverError(err) - ccb.balancerMu.Unlock() +func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { + ccb.mu.Lock() + defer ccb.mu.Unlock() + return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { - return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + if ccb.isIdleOrClosed() { + return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + + if len(addrs) == 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac} - acbw.ac.mu.Lock() + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} ac.acbw = acbw - acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + if ccb.isIdleOrClosed() { return } - delete(ccb.subConns, acbw) - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { + if ccb.isIdleOrClosed() { return } + // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -179,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + if ccb.isIdleOrClosed() { + return + } + ccb.cc.resolveNow(o) } @@ -189,58 +380,80 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn + ac *addrConn // read-only + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.tearDown(errConnDrain) - return + acbw.ac.updateAddrs(addrs) +} + +func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() +} + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.tearDown(errConnDrain) - - if acState == connectivity.Shutdown { - return - } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} - ac, err := cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = ac - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() - if acState != connectivity.Idle { - ac.connect() - } +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) } -func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() - acbw.ac.connect() +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function } -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { acbw.mu.Lock() defer acbw.mu.Unlock() - return acbw.ac + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/prometheus-to-sd/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index f826ec769..ec2c2fa14 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -1,26 +1,44 @@ +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - duration "github.com/golang/protobuf/ptypes/duration" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Enumerates the type of event // Note the terminology is different from the RPC semantics @@ -56,34 +74,55 @@ const ( GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 ) -var GrpcLogEntry_EventType_name = map[int32]string{ - 0: "EVENT_TYPE_UNKNOWN", - 1: "EVENT_TYPE_CLIENT_HEADER", - 2: "EVENT_TYPE_SERVER_HEADER", - 3: "EVENT_TYPE_CLIENT_MESSAGE", - 4: "EVENT_TYPE_SERVER_MESSAGE", - 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", - 6: "EVENT_TYPE_SERVER_TRAILER", - 7: "EVENT_TYPE_CANCEL", -} +// Enum value maps for GrpcLogEntry_EventType. +var ( + GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", + } + GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, + } +) -var GrpcLogEntry_EventType_value = map[string]int32{ - "EVENT_TYPE_UNKNOWN": 0, - "EVENT_TYPE_CLIENT_HEADER": 1, - "EVENT_TYPE_SERVER_HEADER": 2, - "EVENT_TYPE_CLIENT_MESSAGE": 3, - "EVENT_TYPE_SERVER_MESSAGE": 4, - "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, - "EVENT_TYPE_SERVER_TRAILER": 6, - "EVENT_TYPE_CANCEL": 7, +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { + p := new(GrpcLogEntry_EventType) + *p = x + return p } func (x GrpcLogEntry_EventType) String() string { - return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() } +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b7972e58de45083a, []int{0, 0} + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} } // Enumerates the entity that generates the log entry @@ -95,24 +134,45 @@ const ( GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 ) -var GrpcLogEntry_Logger_name = map[int32]string{ - 0: "LOGGER_UNKNOWN", - 1: "LOGGER_CLIENT", - 2: "LOGGER_SERVER", -} +// Enum value maps for GrpcLogEntry_Logger. +var ( + GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) -var GrpcLogEntry_Logger_value = map[string]int32{ - "LOGGER_UNKNOWN": 0, - "LOGGER_CLIENT": 1, - "LOGGER_SERVER": 2, +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { + p := new(GrpcLogEntry_Logger) + *p = x + return p } func (x GrpcLogEntry_Logger) String() string { - return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b7972e58de45083a, []int{0, 1} + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} } type Address_Type int32 @@ -128,32 +188,57 @@ const ( Address_TYPE_UNIX Address_Type = 3 ) -var Address_Type_name = map[int32]string{ - 0: "TYPE_UNKNOWN", - 1: "TYPE_IPV4", - 2: "TYPE_IPV6", - 3: "TYPE_UNIX", -} +// Enum value maps for Address_Type. +var ( + Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) -var Address_Type_value = map[string]int32{ - "TYPE_UNKNOWN": 0, - "TYPE_IPV4": 1, - "TYPE_IPV6": 2, - "TYPE_UNIX": 3, +func (x Address_Type) Enum() *Address_Type { + p := new(Address_Type) + *p = x + return p } func (x Address_Type) String() string { - return proto.EnumName(Address_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } +// Deprecated: Use Address_Type.Descriptor instead. func (Address_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b7972e58de45083a, []int{7, 0} + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} } // Log entry we store in binary logs type GrpcLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The timestamp of the binary log message - Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Uniquely identifies a call. The value must not be 0 in order to disambiguate // from an unset value. // Each call may have several log entries, they will all have the same call_id. @@ -166,11 +251,12 @@ type GrpcLogEntry struct { // durability or ordering is not guaranteed. SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` - Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum // The logger uses one of the following fields to record the payload, // according to the type of the log entry. // - // Types that are valid to be assigned to Payload: + // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -183,100 +269,76 @@ type GrpcLogEntry struct { // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in // the case of trailers-only. On server side, peer is always // logged on EVENT_TYPE_CLIENT_HEADER. - Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` } -func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } -func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } -func (*GrpcLogEntry) ProtoMessage() {} -func (*GrpcLogEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_b7972e58de45083a, []int{0} +func (x *GrpcLogEntry) Reset() { + *x = GrpcLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) +func (x *GrpcLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) -} -func (m *GrpcLogEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_GrpcLogEntry.Merge(m, src) -} -func (m *GrpcLogEntry) XXX_Size() int { - return xxx_messageInfo_GrpcLogEntry.Size(m) -} -func (m *GrpcLogEntry) XXX_DiscardUnknown() { - xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m) + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} +} -func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } -func (m *GrpcLogEntry) GetCallId() uint64 { - if m != nil { - return m.CallId +func (x *GrpcLogEntry) GetCallId() uint64 { + if x != nil { + return x.CallId } return 0 } -func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { - if m != nil { - return m.SequenceIdWithinCall +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if x != nil { + return x.SequenceIdWithinCall } return 0 } -func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType { - if m != nil { - return m.Type +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if x != nil { + return x.Type } return GrpcLogEntry_EVENT_TYPE_UNKNOWN } -func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { - if m != nil { - return m.Logger +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if x != nil { + return x.Logger } return GrpcLogEntry_LOGGER_UNKNOWN } -type isGrpcLogEntry_Payload interface { - isGrpcLogEntry_Payload() -} - -type GrpcLogEntry_ClientHeader struct { - ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` -} - -type GrpcLogEntry_ServerHeader struct { - ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` -} - -type GrpcLogEntry_Message struct { - Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` -} - -type GrpcLogEntry_Trailer struct { - Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` -} - -func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} - func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { if m != nil { return m.Payload @@ -284,59 +346,82 @@ func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { return nil } -func (m *GrpcLogEntry) GetClientHeader() *ClientHeader { - if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok { +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { return x.ClientHeader } return nil } -func (m *GrpcLogEntry) GetServerHeader() *ServerHeader { - if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok { +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { return x.ServerHeader } return nil } -func (m *GrpcLogEntry) GetMessage() *Message { - if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok { +func (x *GrpcLogEntry) GetMessage() *Message { + if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { return x.Message } return nil } -func (m *GrpcLogEntry) GetTrailer() *Trailer { - if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok { +func (x *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { return x.Trailer } return nil } -func (m *GrpcLogEntry) GetPayloadTruncated() bool { - if m != nil { - return m.PayloadTruncated +func (x *GrpcLogEntry) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated } return false } -func (m *GrpcLogEntry) GetPeer() *Address { - if m != nil { - return m.Peer +func (x *GrpcLogEntry) GetPeer() *Address { + if x != nil { + return x.Peer } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*GrpcLogEntry) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*GrpcLogEntry_ClientHeader)(nil), - (*GrpcLogEntry_ServerHeader)(nil), - (*GrpcLogEntry_Message)(nil), - (*GrpcLogEntry_Trailer)(nil), - } +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() } +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + type ClientHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The name of the RPC method, which looks something like: @@ -350,106 +435,122 @@ type ClientHeader struct { // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` // the RPC timeout - Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` } -func (m *ClientHeader) Reset() { *m = ClientHeader{} } -func (m *ClientHeader) String() string { return proto.CompactTextString(m) } -func (*ClientHeader) ProtoMessage() {} -func (*ClientHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_b7972e58de45083a, []int{1} +func (x *ClientHeader) Reset() { + *x = ClientHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ClientHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClientHeader.Unmarshal(m, b) -} -func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) -} -func (m *ClientHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientHeader.Merge(m, src) +func (x *ClientHeader) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ClientHeader) XXX_Size() int { - return xxx_messageInfo_ClientHeader.Size(m) -} -func (m *ClientHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ClientHeader.DiscardUnknown(m) + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ClientHeader proto.InternalMessageInfo +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. +func (*ClientHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} +} -func (m *ClientHeader) GetMetadata() *Metadata { - if m != nil { - return m.Metadata +func (x *ClientHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata } return nil } -func (m *ClientHeader) GetMethodName() string { - if m != nil { - return m.MethodName +func (x *ClientHeader) GetMethodName() string { + if x != nil { + return x.MethodName } return "" } -func (m *ClientHeader) GetAuthority() string { - if m != nil { - return m.Authority +func (x *ClientHeader) GetAuthority() string { + if x != nil { + return x.Authority } return "" } -func (m *ClientHeader) GetTimeout() *duration.Duration { - if m != nil { - return m.Timeout +func (x *ClientHeader) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout } return nil } type ServerHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` } -func (m *ServerHeader) Reset() { *m = ServerHeader{} } -func (m *ServerHeader) String() string { return proto.CompactTextString(m) } -func (*ServerHeader) ProtoMessage() {} -func (*ServerHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_b7972e58de45083a, []int{2} +func (x *ServerHeader) Reset() { + *x = ServerHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ServerHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServerHeader.Unmarshal(m, b) -} -func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) -} -func (m *ServerHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerHeader.Merge(m, src) -} -func (m *ServerHeader) XXX_Size() int { - return xxx_messageInfo_ServerHeader.Size(m) +func (x *ServerHeader) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ServerHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ServerHeader.DiscardUnknown(m) + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ServerHeader proto.InternalMessageInfo +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. +func (*ServerHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} +} -func (m *ServerHeader) GetMetadata() *Metadata { - if m != nil { - return m.Metadata +func (x *ServerHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata } return nil } type Trailer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The gRPC status code. @@ -459,112 +560,124 @@ type Trailer struct { StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` // The value of the 'grpc-status-details-bin' metadata key. If // present, this is always an encoded 'google.rpc.Status' message. - StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` } -func (m *Trailer) Reset() { *m = Trailer{} } -func (m *Trailer) String() string { return proto.CompactTextString(m) } -func (*Trailer) ProtoMessage() {} -func (*Trailer) Descriptor() ([]byte, []int) { - return fileDescriptor_b7972e58de45083a, []int{3} +func (x *Trailer) Reset() { + *x = Trailer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Trailer) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Trailer.Unmarshal(m, b) -} -func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) -} -func (m *Trailer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Trailer.Merge(m, src) +func (x *Trailer) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Trailer) XXX_Size() int { - return xxx_messageInfo_Trailer.Size(m) -} -func (m *Trailer) XXX_DiscardUnknown() { - xxx_messageInfo_Trailer.DiscardUnknown(m) + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Trailer proto.InternalMessageInfo +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. +func (*Trailer) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} +} -func (m *Trailer) GetMetadata() *Metadata { - if m != nil { - return m.Metadata +func (x *Trailer) GetMetadata() *Metadata { + if x != nil { + return x.Metadata } return nil } -func (m *Trailer) GetStatusCode() uint32 { - if m != nil { - return m.StatusCode +func (x *Trailer) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode } return 0 } -func (m *Trailer) GetStatusMessage() string { - if m != nil { - return m.StatusMessage +func (x *Trailer) GetStatusMessage() string { + if x != nil { + return x.StatusMessage } return "" } -func (m *Trailer) GetStatusDetails() []byte { - if m != nil { - return m.StatusDetails +func (x *Trailer) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails } return nil } // Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Length of the message. It may not be the same as the length of the // data field, as the logging payload can be truncated or omitted. Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` // May be truncated or omitted. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_b7972e58de45083a, []int{4} +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Message) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Message.Unmarshal(m, b) -} -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) -} -func (m *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(m, src) -} -func (m *Message) XXX_Size() int { - return xxx_messageInfo_Message.Size(m) +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Message proto.InternalMessageInfo +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} +} -func (m *Message) GetLength() uint32 { - if m != nil { - return m.Length +func (x *Message) GetLength() uint32 { + if x != nil { + return x.Length } return 0 } -func (m *Message) GetData() []byte { - if m != nil { - return m.Data +func (x *Message) GetData() []byte { + if x != nil { + return x.Data } return nil } @@ -577,12 +690,12 @@ func (m *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because @@ -591,222 +704,480 @@ func (m *Message) GetData() []byte { // header is just a normal metadata key. // The pair will not count towards the size limit. type Metadata struct { - Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_b7972e58de45083a, []int{5} + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` } -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metadata.Unmarshal(m, b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Metadata) XXX_Size() int { - return xxx_messageInfo_Metadata.Size(m) + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Metadata proto.InternalMessageInfo +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} +} -func (m *Metadata) GetEntry() []*MetadataEntry { - if m != nil { - return m.Entry +func (x *Metadata) GetEntry() []*MetadataEntry { + if x != nil { + return x.Entry } return nil } // A metadata key value pair type MetadataEntry struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } -func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } -func (*MetadataEntry) ProtoMessage() {} -func (*MetadataEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_b7972e58de45083a, []int{6} + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) -} -func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) -} -func (m *MetadataEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetadataEntry.Merge(m, src) +func (x *MetadataEntry) Reset() { + *x = MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *MetadataEntry) XXX_Size() int { - return xxx_messageInfo_MetadataEntry.Size(m) + +func (x *MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MetadataEntry) XXX_DiscardUnknown() { - xxx_messageInfo_MetadataEntry.DiscardUnknown(m) + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} +} -func (m *MetadataEntry) GetKey() string { - if m != nil { - return m.Key +func (x *MetadataEntry) GetKey() string { + if x != nil { + return x.Key } return "" } -func (m *MetadataEntry) GetValue() []byte { - if m != nil { - return m.Value +func (x *MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value } return nil } // Address information type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` // only for TYPE_IPV4 and TYPE_IPV6 - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` } -func (m *Address) Reset() { *m = Address{} } -func (m *Address) String() string { return proto.CompactTextString(m) } -func (*Address) ProtoMessage() {} -func (*Address) Descriptor() ([]byte, []int) { - return fileDescriptor_b7972e58de45083a, []int{7} +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Address) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Address.Unmarshal(m, b) +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Address.Marshal(b, m, deterministic) -} -func (m *Address) XXX_Merge(src proto.Message) { - xxx_messageInfo_Address.Merge(m, src) -} -func (m *Address) XXX_Size() int { - return xxx_messageInfo_Address.Size(m) -} -func (m *Address) XXX_DiscardUnknown() { - xxx_messageInfo_Address.DiscardUnknown(m) + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Address proto.InternalMessageInfo +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} +} -func (m *Address) GetType() Address_Type { - if m != nil { - return m.Type +func (x *Address) GetType() Address_Type { + if x != nil { + return x.Type } return Address_TYPE_UNKNOWN } -func (m *Address) GetAddress() string { - if m != nil { - return m.Address +func (x *Address) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *Address) GetIpPort() uint32 { - if m != nil { - return m.IpPort +func (x *Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort } return 0 } -func init() { - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) - proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) - proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") - proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") - proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") - proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer") - proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message") - proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") - proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") - proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") -} - -func init() { proto.RegisterFile("grpc/binlog/v1/binarylog.proto", fileDescriptor_b7972e58de45083a) } - -var fileDescriptor_b7972e58de45083a = []byte{ - // 904 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, - 0x10, 0xae, 0xdb, 0x34, 0x6e, 0x26, 0x49, 0xe5, 0xae, 0xca, 0x9d, 0xaf, 0x94, 0x6b, 0x64, 0x09, - 0x14, 0x84, 0xe4, 0xa8, 0x29, 0xd7, 0xe3, 0x05, 0xa4, 0x24, 0xf5, 0xa5, 0x11, 0xb9, 0x34, 0xda, - 0xe4, 0x7a, 0x80, 0x90, 0xac, 0x6d, 0xbc, 0x38, 0x16, 0x8e, 0xd7, 0xac, 0x37, 0x41, 0xf9, 0x59, - 0xbc, 0x21, 0xdd, 0xef, 0xe2, 0x1d, 0x79, 0xd7, 0x4e, 0x4d, 0xd3, 0x82, 0xc4, 0xbd, 0xed, 0x7c, - 0xf3, 0xcd, 0x37, 0xbb, 0xe3, 0x99, 0x31, 0xbc, 0xf4, 0x79, 0x3c, 0x6b, 0xdd, 0x05, 0x51, 0xc8, - 0xfc, 0xd6, 0xea, 0x3c, 0x3d, 0x11, 0xbe, 0x0e, 0x99, 0x6f, 0xc7, 0x9c, 0x09, 0x86, 0x8e, 0x52, - 0xbf, 0x7d, 0x8f, 0xae, 0xce, 0x4f, 0x5e, 0xfa, 0x8c, 0xf9, 0x21, 0x6d, 0x49, 0xc2, 0xdd, 0xf2, - 0x97, 0x96, 0xb7, 0xe4, 0x44, 0x04, 0x2c, 0x52, 0x21, 0x27, 0x67, 0x0f, 0xfd, 0x22, 0x58, 0xd0, - 0x44, 0x90, 0x45, 0xac, 0x08, 0xd6, 0x07, 0x1d, 0x6a, 0x7d, 0x1e, 0xcf, 0x86, 0xcc, 0x77, 0x22, - 0xc1, 0xd7, 0xe8, 0x1b, 0xa8, 0x6c, 0x38, 0xa6, 0xd6, 0xd0, 0x9a, 0xd5, 0xf6, 0x89, 0xad, 0x54, - 0xec, 0x5c, 0xc5, 0x9e, 0xe6, 0x0c, 0x7c, 0x4f, 0x46, 0xcf, 0x41, 0x9f, 0x91, 0x30, 0x74, 0x03, - 0xcf, 0xdc, 0x6d, 0x68, 0xcd, 0x12, 0x2e, 0xa7, 0xe6, 0xc0, 0x43, 0xaf, 0xe0, 0x79, 0x42, 0x7f, - 0x5b, 0xd2, 0x68, 0x46, 0xdd, 0xc0, 0x73, 0x7f, 0x0f, 0xc4, 0x3c, 0x88, 0xdc, 0xd4, 0x69, 0xee, - 0x49, 0xe2, 0x71, 0xee, 0x1e, 0x78, 0xef, 0xa5, 0xb3, 0x47, 0xc2, 0x10, 0x7d, 0x0b, 0x25, 0xb1, - 0x8e, 0xa9, 0x59, 0x6a, 0x68, 0xcd, 0xc3, 0xf6, 0x97, 0xf6, 0xd6, 0xeb, 0xed, 0xe2, 0xc5, 0x6d, - 0x67, 0x45, 0x23, 0x31, 0x5d, 0xc7, 0x14, 0xcb, 0x30, 0xf4, 0x1d, 0x94, 0x43, 0xe6, 0xfb, 0x94, - 0x9b, 0xfb, 0x52, 0xe0, 0x8b, 0xff, 0x12, 0x18, 0x4a, 0x36, 0xce, 0xa2, 0xd0, 0x1b, 0xa8, 0xcf, - 0xc2, 0x80, 0x46, 0xc2, 0x9d, 0x53, 0xe2, 0x51, 0x6e, 0x96, 0x65, 0x31, 0xce, 0x1e, 0x91, 0xe9, - 0x49, 0xde, 0xb5, 0xa4, 0x5d, 0xef, 0xe0, 0xda, 0xac, 0x60, 0xa7, 0x3a, 0x09, 0xe5, 0x2b, 0xca, - 0x73, 0x1d, 0xfd, 0x49, 0x9d, 0x89, 0xe4, 0xdd, 0xeb, 0x24, 0x05, 0x1b, 0x5d, 0x82, 0xbe, 0xa0, - 0x49, 0x42, 0x7c, 0x6a, 0x1e, 0xe4, 0x9f, 0x65, 0x4b, 0xe1, 0xad, 0x62, 0x5c, 0xef, 0xe0, 0x9c, - 0x9c, 0xc6, 0x09, 0x4e, 0x82, 0x90, 0x72, 0xb3, 0xf2, 0x64, 0xdc, 0x54, 0x31, 0xd2, 0xb8, 0x8c, - 0x8c, 0xbe, 0x82, 0xa3, 0x98, 0xac, 0x43, 0x46, 0x3c, 0x57, 0xf0, 0x65, 0x34, 0x23, 0x82, 0x7a, - 0x26, 0x34, 0xb4, 0xe6, 0x01, 0x36, 0x32, 0xc7, 0x34, 0xc7, 0x91, 0x0d, 0xa5, 0x98, 0x52, 0x6e, - 0x56, 0x9f, 0xcc, 0xd0, 0xf1, 0x3c, 0x4e, 0x93, 0x04, 0x4b, 0x9e, 0xf5, 0x97, 0x06, 0x95, 0xcd, - 0x07, 0x43, 0xcf, 0x00, 0x39, 0xb7, 0xce, 0x68, 0xea, 0x4e, 0x7f, 0x1c, 0x3b, 0xee, 0xbb, 0xd1, - 0xf7, 0xa3, 0x9b, 0xf7, 0x23, 0x63, 0x07, 0x9d, 0x82, 0x59, 0xc0, 0x7b, 0xc3, 0x41, 0x7a, 0xbe, - 0x76, 0x3a, 0x57, 0x0e, 0x36, 0xb4, 0x07, 0xde, 0x89, 0x83, 0x6f, 0x1d, 0x9c, 0x7b, 0x77, 0xd1, - 0x67, 0xf0, 0x62, 0x3b, 0xf6, 0xad, 0x33, 0x99, 0x74, 0xfa, 0x8e, 0xb1, 0xf7, 0xc0, 0x9d, 0x05, - 0xe7, 0xee, 0x12, 0x6a, 0xc0, 0xe9, 0x23, 0x99, 0x3b, 0xc3, 0x37, 0x6e, 0x6f, 0x78, 0x33, 0x71, - 0x8c, 0xfd, 0xc7, 0x05, 0xa6, 0xb8, 0x33, 0x18, 0x3a, 0xd8, 0x28, 0xa3, 0x4f, 0xe0, 0xa8, 0x28, - 0xd0, 0x19, 0xf5, 0x9c, 0xa1, 0xa1, 0x5b, 0x5d, 0x28, 0xab, 0x36, 0x43, 0x08, 0x0e, 0x87, 0x37, - 0xfd, 0xbe, 0x83, 0x0b, 0xef, 0x3d, 0x82, 0x7a, 0x86, 0xa9, 0x8c, 0x86, 0x56, 0x80, 0x54, 0x0a, - 0x63, 0xb7, 0x5b, 0x01, 0x3d, 0xab, 0xbf, 0xf5, 0x41, 0x83, 0x5a, 0xb1, 0xf9, 0xd0, 0x6b, 0x38, - 0x58, 0x50, 0x41, 0x3c, 0x22, 0x48, 0x36, 0xbc, 0x9f, 0x3e, 0xda, 0x25, 0x8a, 0x82, 0x37, 0x64, - 0x74, 0x06, 0xd5, 0x05, 0x15, 0x73, 0xe6, 0xb9, 0x11, 0x59, 0x50, 0x39, 0xc0, 0x15, 0x0c, 0x0a, - 0x1a, 0x91, 0x05, 0x45, 0xa7, 0x50, 0x21, 0x4b, 0x31, 0x67, 0x3c, 0x10, 0x6b, 0x39, 0xb6, 0x15, - 0x7c, 0x0f, 0xa0, 0x0b, 0xd0, 0xd3, 0x45, 0xc0, 0x96, 0x42, 0x8e, 0x6b, 0xb5, 0xfd, 0x62, 0x6b, - 0x67, 0x5c, 0x65, 0x9b, 0x09, 0xe7, 0x4c, 0xab, 0x0f, 0xb5, 0x62, 0xc7, 0xff, 0xef, 0xcb, 0x5b, - 0x7f, 0x68, 0xa0, 0x67, 0x1d, 0xfc, 0x51, 0x15, 0x48, 0x04, 0x11, 0xcb, 0xc4, 0x9d, 0x31, 0x4f, - 0x55, 0xa0, 0x8e, 0x41, 0x41, 0x3d, 0xe6, 0x51, 0xf4, 0x39, 0x1c, 0x66, 0x84, 0x7c, 0x0e, 0x55, - 0x19, 0xea, 0x0a, 0xcd, 0x46, 0xaf, 0x40, 0xf3, 0xa8, 0x20, 0x41, 0x98, 0xc8, 0x8a, 0xd4, 0x72, - 0xda, 0x95, 0x02, 0xad, 0x57, 0xa0, 0xe7, 0x11, 0xcf, 0xa0, 0x1c, 0xd2, 0xc8, 0x17, 0x73, 0x79, - 0xe1, 0x3a, 0xce, 0x2c, 0x84, 0xa0, 0x24, 0x9f, 0xb1, 0x2b, 0xe3, 0xe5, 0xd9, 0xea, 0xc2, 0x41, - 0x7e, 0x77, 0x74, 0x09, 0xfb, 0x34, 0xdd, 0x5c, 0xa6, 0xd6, 0xd8, 0x6b, 0x56, 0xdb, 0x8d, 0x7f, - 0x79, 0xa7, 0xdc, 0x70, 0x58, 0xd1, 0xad, 0xd7, 0x50, 0xff, 0x07, 0x8e, 0x0c, 0xd8, 0xfb, 0x95, - 0xae, 0x65, 0xf6, 0x0a, 0x4e, 0x8f, 0xe8, 0x18, 0xf6, 0x57, 0x24, 0x5c, 0xd2, 0x2c, 0xb7, 0x32, - 0xac, 0x3f, 0x35, 0xd0, 0xb3, 0x39, 0x46, 0x17, 0xd9, 0x76, 0xd6, 0xe4, 0x72, 0x3d, 0x7b, 0x7a, - 0xe2, 0xed, 0xc2, 0x4e, 0x36, 0x41, 0x27, 0x0a, 0xcd, 0x3a, 0x2c, 0x37, 0xd3, 0x9f, 0x47, 0x10, - 0xbb, 0x31, 0xe3, 0x42, 0x56, 0xb5, 0x8e, 0xcb, 0x41, 0x3c, 0x66, 0x5c, 0x58, 0x0e, 0x94, 0xe4, - 0x8e, 0x30, 0xa0, 0xf6, 0x60, 0x3b, 0xd4, 0xa1, 0x22, 0x91, 0xc1, 0xf8, 0xf6, 0x6b, 0x43, 0x2b, - 0x9a, 0x97, 0xc6, 0xee, 0xc6, 0x7c, 0x37, 0x1a, 0xfc, 0x60, 0xec, 0x75, 0x7f, 0x86, 0xe3, 0x80, - 0x6d, 0x5f, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, 0xda, 0x4f, 0xed, - 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0xf7, 0x5b, 0xf9, 0x7f, 0x59, 0x85, 0x49, 0xd3, - 0xdd, 0x98, 0xee, 0xea, 0xfc, 0xae, 0x2c, 0xbb, 0xfc, 0xe2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x10, 0x93, 0x68, 0x41, 0xc2, 0x07, 0x00, 0x00, +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, + 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once + file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { + file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + }) + return file_grpc_binlog_v1_binarylog_proto_rawDescData +} + +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ + 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType + 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger + 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader + 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader + 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message + 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer + 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address + 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration + 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata + 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry + 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { + if File_grpc_binlog_v1_binarylog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, + DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, + EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, + MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, + }.Build() + File_grpc_binlog_v1_binarylog_proto = out.File + file_grpc_binlog_v1_binarylog_proto_rawDesc = nil + file_grpc_binlog_v1_binarylog_proto_goTypes = nil + file_grpc_binlog_v1_binarylog_proto_depIdxs = nil } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/call.go b/prometheus-to-sd/vendor/google.golang.org/grpc/call.go index 9e20e4d38..e6a1dc5d7 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/call.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/call.go @@ -27,6 +27,11 @@ import ( // // All errors returned by Invoke are compatible with the status package. func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/channelz/channelz.go b/prometheus-to-sd/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 000000000..32b7fa579 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/clientconn.go b/prometheus-to-sd/vendor/google.golang.org/grpc/clientconn.go index ae5ce4947..95a7459b0 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/clientconn.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/clientconn.go @@ -23,8 +23,7 @@ import ( "errors" "fmt" "math" - "net" - "reflect" + "net/url" "strings" "sync" "sync/atomic" @@ -38,7 +37,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -48,6 +47,7 @@ import ( _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. ) const ( @@ -68,6 +68,9 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -78,17 +81,17 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") - // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., OAuth2 token) which requires secure connection on an insecure - // connection. + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") - // errCredentialsConflict indicates that grpc.WithTransportCredentials() - // and grpc.WithInsecure() are both called for a connection. - errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") ) const ( @@ -104,6 +107,17 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) } +type defaultConfigSelector struct { + sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + // DialContext creates a client connection to the given target. By default, it's // a non-blocking dial (the function won't wait for connections to be // established, and connecting happens in the background). To make it a blocking @@ -122,16 +136,43 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), - } + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), + } + + // We start the channel off in idle mode, but kick it out of idle at the end + // of this method, instead of waiting for the first RPC. Other gRPC + // implementations do wait for the first RPC to kick the channel out of + // idle. But doing so would be a major behavior change for our users who are + // used to seeing the channel active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, if at all we ever get to do that. + cc.idlenessState = ccIdlenessStateIdle + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + cc.exitIdleCond = sync.NewCond(&cc.mu) + + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } + } for _, opt := range opts { opt.apply(&cc.dopts) @@ -146,40 +187,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtINFO, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") - } - cc.csMgr.channelzID = cc.channelzID - } + // Register ClientConn with channelz. + cc.channelzRegistration(target) - if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - } else { - if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { - return nil, errCredentialsConflict - } - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } + if err := cc.validateTransportCredentials(); err != nil { + return nil, err } if cc.dopts.defaultServiceConfigRawJSON != nil { @@ -191,16 +203,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.Dialer == nil { - cc.dopts.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { - network, addr := parseDialTarget(addr) - return (&net.Dialer{}).DialContext(ctx, network, addr) - } - if cc.dopts.withProxy { - cc.dopts.copts.Dialer = newProxyDialer(cc.dopts.copts.Dialer) - } - } - if cc.dopts.copts.UserAgent != "" { cc.dopts.copts.UserAgent += " " + grpcUA } else { @@ -227,61 +229,25 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - scSet := false - if cc.dopts.scChan != nil { - // Try to get an initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - scSet = true - } - default: - } - } if cc.dopts.bs == nil { cc.dopts.bs = backoff.DefaultExponential } // Determine the resolver to use. - cc.parsedTarget = grpcutil.ParseTarget(cc.target) - unixScheme := strings.HasPrefix(cc.target, "unix:") - channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) - resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) - } + if err := cc.parseTargetAndFindResolver(); err != nil { + return nil, err } - - creds := cc.dopts.copts.TransportCredentials - if creds != nil && creds.Info().ServerName != "" { - cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.authority != "" { - cc.authority = cc.dopts.authority - } else if unixScheme { - cc.authority = "localhost" - } else { - // Use endpoint from "scheme://authority/endpoint" as the default - // authority for ClientConn. - cc.authority = cc.parsedTarget.Endpoint + if err = cc.determineAuthority(); err != nil { + return nil, err } - if cc.dopts.scChan != nil && !scSet { + if cc.dopts.scChan != nil { // Blocking wait for the initial service config. select { case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) } case <-ctx.Done(): return nil, ctx.Err() @@ -291,54 +257,224 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } + // This creates the name resolver, load balancer, blocking picker etc. + if err := cc.exitIdleMode(); err != nil { + return nil, err + } + + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. + cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) + + // Return early for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. +func (cc *ClientConn) exitIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { + cc.mu.Unlock() + logger.Info("ClientConn asked to exit idle mode when not in idle mode") + return nil + } + + defer func() { + // When Close() and exitIdleMode() race against each other, one of the + // following two can happen: + // - Close() wins the race and runs first. exitIdleMode() runs after, and + // sees that the ClientConn is already closed and hence returns early. + // - exitIdleMode() wins the race and runs first and recreates the balancer + // and releases the lock before recreating the resolver. If Close() runs + // in this window, it will wait for exitIdleMode to complete. + // + // We achieve this synchronization using the below condition variable. + cc.mu.Lock() + cc.idlenessState = ccIdlenessStateActive + cc.exitIdleCond.Signal() + cc.mu.Unlock() + }() + + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { + cc.blockingpicker = newPickerWrapper() + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true + } + var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, + if cc.balancerWrapper == nil { + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + } else { + cc.balancerWrapper.exitIdleMode() } + cc.firstResolveEvent = grpcsync.NewEvent() + cc.mu.Unlock() - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline which needs to be handled + // by cc.updateResolverState() which also grabs cc.mu. + if err := cc.initResolverWrapper(credsClone); err != nil { + return err } + + if exitedIdle { + cc.addTraceEvent("exiting idle mode") + } + return nil +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer and any subchannels. +func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() - cc.resolverWrapper = rWrapper + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { + logger.Error("ClientConn asked to enter idle mode when not active") + return nil + } + + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + conns := cc.conns + cc.conns = make(map[*addrConn]struct{}) + + // TODO: Currently, we close the resolver wrapper upon entering idle mode + // and create a new one upon exiting idle mode. This means that the + // `cc.resolverWrapper` field would be overwritten everytime we exit idle + // mode. While this means that we need to hold `cc.mu` when accessing + // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should + // try to do the same for the balancer and picker wrappers too. + cc.resolverWrapper.close() + cc.blockingpicker.enterIdleMode() + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() + go func() { + cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() + return nil +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing } } } + return nil +} - return cc, nil +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") + cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -409,7 +545,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -475,40 +611,70 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + idlenessMgr idlenessManager + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper + safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. + + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + idlenessState ccIdlenessState // Tracks idleness state of the channel. + exitIdleCond *sync.Cond // Signalled when channel exits idle. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } +// ccIdlenessState tracks the idleness state of the channel. +// +// Channels start off in `active` and move to `idle` after a period of +// inactivity. When moving back to `active` upon an incoming RPC, they +// transition through `exiting_idle`. This state is useful for synchronization +// with Close(). +// +// This state tracking is mostly for self-protection. The idlenessManager is +// expected to keep track of the state as well, and is expected not to call into +// the ClientConn unnecessarily. +type ccIdlenessState int8 + +const ( + ccIdlenessStateActive ccIdlenessState = iota + ccIdlenessStateIdle + ccIdlenessStateExitingIdle +) + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -523,11 +689,30 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + cc.exitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.balancerWrapper.exitIdleMode() +} + func (cc *ClientConn) scWatcher() { for { select { @@ -539,6 +724,7 @@ func (cc *ClientConn) scWatcher() { // TODO: load balance policy runtime change is ignored. // We may revisit this decision in the future. cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) cc.mu.Unlock() case <-cc.ctx.Done(): return @@ -577,13 +763,13 @@ func init() { func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { if cc.sc != nil { - cc.applyServiceConfigAndBalancer(cc.sc, addrs) + cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) return } if cc.dopts.defaultServiceConfig != nil { - cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs) + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) } else { - cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs) + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) } } @@ -604,9 +790,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -614,24 +798,30 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var ret error - if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig(s.Addresses) + } else if s.ServiceConfig == nil { cc.maybeApplyDefaultServiceConfig(s.Addresses) // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? } else { if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { - cc.applyServiceConfigAndBalancer(sc, s.Addresses) + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + } + } else { + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -639,24 +829,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -665,51 +843,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - cc.balancerWrapper.close() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -724,27 +879,31 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() + defer cc.mu.Unlock() if cc.conns == nil { - cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtINFO, - }, - }) - } + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err + } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} - cc.mu.Unlock() return ac, nil } @@ -773,7 +932,11 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { } // Target returns the target string of the ClientConn. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (cc *ClientConn) Target() string { return cc.target } @@ -797,67 +960,127 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - // Start a goroutine connecting to the server asynchronously. - go ac.resetTransport() + ac.resetTransport() return nil } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - defer ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - ac.addrs = addrs - return true + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - if ac.state == connectivity.Connecting { - return false + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } } - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. + + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => Close => onClose, which + // requires locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil } - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() +} + +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] } // GetMethodConfig gets the method config of the input method. @@ -872,17 +1095,7 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { // TODO: Avoid the locking here. cc.mu.RLock() defer cc.mu.RUnlock() - if cc.sc == nil { - return MethodConfig{} - } - if m, ok := cc.sc.Methods[method]; ok { - return m - } - i := strings.LastIndex(method, "/") - if m, ok := cc.sc.Methods[method[:i+1]]; ok { - return m - } - return cc.sc.Methods[""] + return getMethodConfig(cc.sc, method) } func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { @@ -894,23 +1107,22 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } -func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) { +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { if sc == nil { // should never reach here. return } cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } if cc.sc.retryThrottling != nil { newThrottler := &retryThrottler{ @@ -924,35 +1136,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []r cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -974,7 +1177,10 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (cc *ClientConn) ResetConnectBackoff() { cc.mu.Lock() conns := cc.conns @@ -993,44 +1199,45 @@ func (cc *ClientConn) Close() error { cc.mu.Unlock() return ErrClientConnClosing } + + for cc.idlenessState == ccIdlenessStateExitingIdle { + cc.exitIdleCond.Wait() + } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + pWrapper := cc.blockingpicker rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil + idlenessMgr := cc.idlenessMgr cc.mu.Unlock() - cc.blockingpicker.close() - - if rWrapper != nil { - rWrapper.close() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. + if pWrapper != nil { + pWrapper.close() } if bWrapper != nil { bWrapper.close() } + if rWrapper != nil { + rWrapper.close() + } + if idlenessMgr != nil { + idlenessMgr.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", - Severity: channelz.CtINFO, - } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtINFO, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) - } + cc.addTraceEvent("deleted") + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1055,12 +1262,13 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1069,8 +1277,15 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + if lastErr == nil { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1089,113 +1304,86 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } func (ac *addrConn) resetTransport() { - for i := 0; ; i++ { - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOptions{}) - } + ac.mu.Lock() + acCtx := ac.ctx + if acCtx.Err() != nil { + ac.mu.Unlock() + return + } - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() - } + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + if acCtx.Err() != nil { + return } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) + ac.mu.Lock() + ac.updateConnectivityState(connectivity.TransientFailure, err) - ac.updateConnectivityState(connectivity.Connecting, nil) - ac.transport = nil + // Backoff. + b := ac.resetBackoff ac.mu.Unlock() - newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) - if err != nil { - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure, err) - - // Backoff. - b := ac.resetBackoff + ac.backoffIdx++ ac.mu.Unlock() - - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-ac.ctx.Done(): - timer.Stop() - return - } - continue + case <-b: + timer.Stop() + case <-acCtx.Done(): + timer.Stop() + return } ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - newTr.Close() - return + if acCtx.Err() == nil { + ac.updateConnectivityState(connectivity.Idle, err) } - ac.curAddr = addr - ac.transport = newTr - ac.backoffIdx = 0 - - hctx, hcancel := context.WithCancel(ac.ctx) - ac.startHealthCheck(hctx) ac.mu.Unlock() - - // Block until the created transport is down. And when this happens, - // we restart from the top of the addr list. - <-reconnect.Done() - hcancel() - // restart connecting - the top of the loop will set state to - // CONNECTING. This is against the current connectivity semantics doc, - // however it allows for graceful behavior for RPCs not yet dispatched - // - unfortunate timing would otherwise lead to the RPC failing even - // though the TRANSIENT_FAILURE state (called for by the doc) would be - // instantaneous. - // - // Ideally we should transition to Idle here and block until there is - // RPC activity that leads to the balancer requesting a reconnect of - // the associated SubConn. + return } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at the -// first successful one. It returns the transport, the address and a Event in -// the successful case. The Event fires when the returned transport disconnects. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return nil, resolver.Address{}, nil, errConnClosing + if ctx.Err() != nil { + return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1209,9 +1397,9 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { - return newTr, addr, reconnect, nil + return nil } if firstConnErr == nil { firstConnErr = err @@ -1220,86 +1408,90 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } // Couldn't connect to any address. - return nil, resolver.Address{}, nil, firstConnErr + return firstConnErr } -// createTransport creates a connection to addr. It returns the transport and a -// Event in the successful case. The Event fires when the returned transport -// disconnects. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { - prefaceReceived := make(chan struct{}) - onCloseCalled := make(chan struct{}) - reconnect := grpcsync.NewEvent() +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ctx) - // addr.ServerName takes precedent over ClientConn authority, if present. - if addr.ServerName == "" { - addr.ServerName = ac.cc.authority - } - - once := sync.Once{} - onGoAway := func(r transport.GoAwayReason) { + onClose := func(r transport.GoAwayReason) { ac.mu.Lock() + defer ac.mu.Unlock() + // adjust params based on GoAwayReason ac.adjustParams(r) - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - reconnect.Fire() - } - - onClose := func() { - ac.mu.Lock() - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - close(onCloseCalled) - reconnect.Fire() - } - - onPrefaceReceipt := func() { - close(prefaceReceived) + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } + ac.transport = nil + // Refresh the name resolver on any connection loss. + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) } - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) - return nil, nil, err + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + return err } - select { - case <-time.After(time.Until(connectDeadline)): - // We didn't get the preface in time. - newTr.Close() - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: - // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + ac.mu.Lock() + defer ac.mu.Unlock() + if ctx.Err() != nil { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. + go newTr.Close(transport.ErrConnClosing) + return nil + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) + return nil } - return newTr, reconnect, nil + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1369,7 +1561,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) } } }() @@ -1383,33 +1575,43 @@ func (ac *addrConn) resetConnectBackoff() { ac.mu.Unlock() } -// getReadyTransport returns the transport if ac's state is READY. -// Otherwise it returns nil, false. -// If ac's state is IDLE, it will trigger ac to connect. -func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { ac.mu.Lock() - if ac.state == connectivity.Ready && ac.transport != nil { - t := ac.transport - ac.mu.Unlock() - return t, true - } - var idle bool - if ac.state == connectivity.Idle { - idle = true + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport } - ac.mu.Unlock() - // Trigger idle ac to connect. - if idle { - ac.connect() + return nil +} + +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } } - return nil, false + return nil, status.FromContextError(ctx.Err()).Err() } // tearDown starts to tear down the addrConn. -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many addrConn's in a -// tight loop. -// tearDown doesn't remove ac from ac.cc.conns. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. func (ac *addrConn) tearDown(err error) { ac.mu.Lock() if ac.state == connectivity.Shutdown { @@ -1433,19 +1635,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtINFO, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } @@ -1534,6 +1735,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { if scheme == rb.Scheme() { @@ -1554,3 +1758,151 @@ func (cc *ClientConn) connectionError() error { defer cc.lceMu.Unlock() return cc.lastConnectionError } + +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb == nil { + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + } + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing scheme, authority and url. Query +// params are stripped from the endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + + return resolver.Target{ + Scheme: u.Scheme, + Authority: u.Host, + URL: *u, + }, nil +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + endpoint := cc.parsedTarget.Endpoint() + target := cc.target + switch { + case authorityFromDialOption != "": + cc.authority = authorityFromDialOption + case authorityFromCreds != "": + cc.authority = authorityFromCreds + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + cc.authority = "localhost" + case strings.HasPrefix(endpoint, ":"): + cc.authority = "localhost" + endpoint + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + cc.authority = endpoint + } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +} + +// initResolverWrapper creates a ccResolverWrapper, which builds the name +// resolver. This method grabs the lock to assign the newly built resolver +// wrapper to the cc.resolverWrapper field. +func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { + rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: creds, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) + if err != nil { + return fmt.Errorf("failed to build resolver: %v", err) + } + // Resolver implementations may report state update or error inline when + // built (or right after), and this is handled in cc.updateResolverState. + // Also, an error from the resolver might lead to a re-resolution request + // from the balancer, which is handled in resolveNow() where + // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. + cc.mu.Lock() + cc.resolverWrapper = rw + cc.mu.Unlock() + return nil +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/codes/code_string.go b/prometheus-to-sd/vendor/google.golang.org/grpc/codes/code_string.go index 0b206a578..934fac2b0 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/codes/code_string.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/codes/code_string.go @@ -18,7 +18,15 @@ package codes -import "strconv" +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} func (c Code) String() string { switch c { @@ -60,3 +68,44 @@ func (c Code) String() string { return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/connectivity/connectivity.go b/prometheus-to-sd/vendor/google.golang.org/grpc/connectivity/connectivity.go index 34ec36fbf..4a8992642 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -18,15 +18,14 @@ // Package connectivity defines connectivity semantics. // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. package connectivity import ( - "context" - "google.golang.org/grpc/grpclog" ) +var logger = grpclog.Component("core") + // State indicates the state of connectivity. // It can be the state of a ClientConn or SubConn. type State int @@ -44,8 +43,8 @@ func (s State) String() string { case Shutdown: return "SHUTDOWN" default: - grpclog.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" + logger.Errorf("unknown connectivity state: %d", s) + return "INVALID_STATE" } } @@ -62,12 +61,34 @@ const ( Shutdown ) -// Reporter reports the connectivity states. -type Reporter interface { - // CurrentState returns the current state of the reporter. - CurrentState() State - // WaitForStateChange blocks until the reporter's state is different from the given state, - // and returns true. - // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). - WaitForStateChange(context.Context, State) bool +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/credentials.go b/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/credentials.go index 02766443a..5feac3aa0 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/credentials.go @@ -30,22 +30,22 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc/attributes" - "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" ) // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. Additionally, RequestInfo data will be - // available via ctx to this call. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. @@ -58,9 +58,9 @@ type PerRPCCredentials interface { type SecurityLevel int const ( - // Invalid indicates an invalid security level. + // InvalidSecurityLevel indicates an invalid security level. // The zero SecurityLevel value is invalid for backward compatibility. - Invalid SecurityLevel = iota + InvalidSecurityLevel SecurityLevel = iota // NoSecurity indicates a connection is insecure. NoSecurity // IntegrityOnly indicates a connection only provides integrity protection. @@ -92,7 +92,7 @@ type CommonAuthInfo struct { } // GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. -func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo { +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { return c } @@ -140,6 +140,11 @@ type TransportCredentials interface { // Additionally, ClientHandshakeInfo data will be available via the context // passed to this call. // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns @@ -153,9 +158,13 @@ type TransportCredentials interface { Info() ProtocolInfo // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials - // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. - // gRPC internals also use it to override the virtual hosting name if it is set. - // It must be called before dialing. Currently, this is only used by grpclb. + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. OverrideServerName(string) error } @@ -169,8 +178,18 @@ type TransportCredentials interface { // // This API is experimental. type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. // @@ -188,15 +207,12 @@ type RequestInfo struct { AuthInfo AuthInfo } -// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. -type requestInfoKey struct{} - // RequestInfoFromContext extracts the RequestInfo from the context if it exists. // // This API is experimental. func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { - ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) - return + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok } // ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes @@ -211,16 +227,12 @@ type ClientHandshakeInfo struct { Attributes *attributes.Attributes } -// clientHandshakeInfoKey is a struct used as the key to store -// ClientHandshakeInfo in a context. -type clientHandshakeInfoKey struct{} - // ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored // in ctx. // // This API is experimental. func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { - chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo) + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) return chi } @@ -229,17 +241,16 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. // // This API is experimental. -func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { type internalInfo interface { - GetCommonAuthInfo() *CommonAuthInfo + GetCommonAuthInfo() CommonAuthInfo } - ri, _ := RequestInfoFromContext(ctx) - if ri.AuthInfo == nil { - return errors.New("unable to obtain SecurityLevel from context") + if ai == nil { + return errors.New("AuthInfo is nil") } - if ci, ok := ri.AuthInfo.(internalInfo); ok { + if ci, ok := ai.(internalInfo); ok { // CommonAuthInfo.SecurityLevel has an invalid value. - if ci.GetCommonAuthInfo().SecurityLevel == Invalid { + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { return nil } if ci.GetCommonAuthInfo().SecurityLevel < level { @@ -250,15 +261,6 @@ func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { return nil } -func init() { - internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { - return context.WithValue(ctx, requestInfoKey{}, ri) - } - internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context { - return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) - } -} - // ChannelzSecurityInfo defines the interface that security protocols should implement // in order to provide security info to channelz. // diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/go12.go b/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/go12.go deleted file mode 100644 index ccbf35b33..000000000 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/go12.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build go1.12 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import "crypto/tls" - -// This init function adds cipher suite constants only defined in Go 1.12. -func init() { - cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" - cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" - cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" -} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 000000000..82bee1443 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/tls.go b/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/tls.go index 1ba6f3a6b..877b7cd21 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/tls.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/tls.go @@ -23,11 +23,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/url" + "os" - "google.golang.org/grpc/credentials/internal" credinternal "google.golang.org/grpc/internal/credentials" ) @@ -73,7 +72,7 @@ func (c tlsCreds) Info() ProtocolInfo { func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints - cfg := cloneTLSConfig(c.config) + cfg := credinternal.CloneTLSConfig(c.config) if cfg.ServerName == "" { serverName, _, err := net.SplitHostPort(authority) if err != nil { @@ -108,7 +107,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon if id != nil { tlsInfo.SPIFFEID = id } - return internal.WrapSyscallConn(rawConn, conn), tlsInfo, nil + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { @@ -127,7 +126,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if id != nil { tlsInfo.SPIFFEID = id } - return internal.WrapSyscallConn(rawConn, conn), tlsInfo, nil + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil } func (c *tlsCreds) Clone() TransportCredentials { @@ -139,23 +138,10 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } -const alpnProtoStrH2 = "h2" - -func appendH2ToNextProtos(ps []string) []string { - for _, p := range ps { - if p == alpnProtoStrH2 { - return ps - } - } - ret := make([]string, 0, len(ps)+1) - ret = append(ret, ps...) - return append(ret, alpnProtoStrH2) -} - // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{cloneTLSConfig(c)} - tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) return tc } @@ -180,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) + b, err := os.ReadFile(certFile) if err != nil { return nil, err } @@ -209,7 +195,10 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type TLSChannelzSecurityValue struct { ChannelzSecurityValue StandardName string @@ -241,19 +230,7 @@ var cipherSuiteLookup = map[uint16]string{ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", -} - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO: inline this function if possible. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - - return cfg.Clone() + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/dialoptions.go b/prometheus-to-sd/vendor/google.golang.org/grpc/dialoptions.go index decb4c5ee..15a3d5102 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/dialoptions.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/dialoptions.go @@ -20,22 +20,34 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + globalDialOptions = append(globalDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + globalDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -45,20 +57,18 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -66,12 +76,8 @@ type dialOptions struct { minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string - // This is used by ccResolverWrapper to backoff between successive calls to - // resolver.ResolveNow(). The user will have no need to configure this, but - // we need to be able to configure this in tests. - resolveNowBackoff func(int) time.Duration - resolvers []resolver.Builder - withProxy bool + resolvers []resolver.Builder + idleTimeout time.Duration } // DialOption configures how we set up the connection. @@ -79,14 +85,29 @@ type DialOption interface { apply(*dialOptions) } +var globalDialOptions []DialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { @@ -103,13 +124,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -119,8 +155,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s @@ -198,25 +235,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -230,15 +248,14 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption { }) } -// WithConnectParams configures the dialer to use the provided ConnectParams. +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. // // The backoff configuration specified as part of the ConnectParams overrides // all defaults specified in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider // using the backoff.DefaultConfig as a base, in cases where you want to // override only a subset of the backoff configuration. -// -// This API is EXPERIMENTAL. func WithConnectParams(p ConnectParams) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = internalbackoff.Exponential{Config: p.Backoff} @@ -276,9 +293,12 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { }) } -// WithBlock returns a DialOption which makes caller of Dial blocks until the +// WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -290,7 +310,13 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // -// This API is EXPERIMENTAL. +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithReturnConnectionError() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -299,21 +325,30 @@ func WithReturnConnectionError() DialOption { } // WithInsecure returns a DialOption which disables transport security for this -// ClientConn. Note that transport security is required unless WithInsecure is -// set. +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.insecure = true + o.copts.TransportCredentials = insecure.NewCredentials() }) } // WithNoProxy returns a DialOption which disables the use of proxies for this // ClientConn. This is ignored if WithDialer or WithContextDialer are used. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithNoProxy() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.withProxy = false + o.copts.UseProxy = false }) } @@ -338,7 +373,10 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // -// This API is experimental. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithCredentialsBundle(b credentials.Bundle) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.CredsBundle = b @@ -391,7 +429,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl }) } @@ -403,7 +455,13 @@ func WithStatsHandler(h stats.Handler) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// This is an EXPERIMENTAL API. +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func FailOnNonTempDialError(f bool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.FailOnNonTempDialError = f @@ -469,8 +527,7 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt } // WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header. This value only works with WithInsecure and has no -// effect if TransportCredentials are present. +// :authority pseudo-header and as the server name in authentication handshake. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a @@ -481,8 +538,11 @@ func WithAuthority(a string) DialOption { // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). // -// This API is EXPERIMENTAL. -func WithChannelzParentID(id int64) DialOption { +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) @@ -503,11 +563,16 @@ func WithDisableServiceConfig() DialOption { // WithDefaultServiceConfig returns a DialOption that configures the default // service config, which will be used in cases where: // -// 1. WithDisableServiceConfig is also used. -// 2. Resolver does not return a service config or if the resolver returns an -// invalid service config. +// 1. WithDisableServiceConfig is also used, or +// +// 2. The name resolver does not provide a service config or provides an +// invalid service config. // -// This API is EXPERIMENTAL. +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go func WithDefaultServiceConfig(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.defaultServiceConfigRawJSON = &s @@ -518,12 +583,6 @@ func WithDefaultServiceConfig(s string) DialOption { // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. -// -// Retry support is currently disabled by default, but will be enabled by -// default in the future. Until then, it may be enabled by setting the -// environment variable "GRPC_GO_RETRY" to "on". -// -// This API is EXPERIMENTAL. func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -541,7 +600,10 @@ func WithMaxHeaderListSize(s uint32) DialOption { // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithDisableHealthCheck() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableHealthCheck = true @@ -560,14 +622,12 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - disableRetry: !envconfig.Retry, healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + UseProxy: true, }, - resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, - withProxy: true, } } @@ -582,24 +642,37 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { }) } -// withResolveNowBackoff specifies the function that clientconn uses to backoff -// between successive calls to resolver.ResolveNow(). -// -// For testing purpose only. -func withResolveNowBackoff(f func(int) time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolveNowBackoff = f - }) -} - // WithResolvers allows a list of resolver implementations to be registered // locally with the ClientConn without needing to be globally registered via // resolver.Register. They will be matched against the scheme used for the // current Dial only, and will take precedence over the global registry. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func WithResolvers(rs ...resolver.Builder) DialOption { return newFuncDialOption(func(o *dialOptions) { o.resolvers = append(o.resolvers, rs...) }) } + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// By default this feature is disabled, which can also be explicitly configured +// by passing zero to this function. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/encoding/encoding.go b/prometheus-to-sd/vendor/google.golang.org/grpc/encoding/encoding.go index 195e8448b..07a586135 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/encoding/encoding.go @@ -19,12 +19,17 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// This package is EXPERIMENTAL. +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -46,10 +51,15 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // EXPERIMENTAL: if a Compressor implements + // If a Compressor implements // DecompressedSize(compressedBytes []byte) int, gRPC will call it // to determine the size of the buffer allocated for the result of decompression. // Return -1 to indicate unknown size. + // + // Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. } var registeredCompressor = make(map[string]Compressor) @@ -65,6 +75,9 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } } // GetCompressor returns Compressor for the given compressor name. @@ -100,7 +113,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/encoding/proto/proto.go b/prometheus-to-sd/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66b97a6f6..3009b35af 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -21,8 +21,7 @@ package proto import ( - "math" - "sync" + "fmt" "github.com/golang/protobuf/proto" "google.golang.org/grpc/encoding" @@ -38,73 +37,22 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -type cachedProtoBuffer struct { - lastMarshaledSize uint32 - proto.Buffer -} - -func capToMaxInt32(val int) uint32 { - if val > math.MaxInt32 { - return uint32(math.MaxInt32) - } - return uint32(val) -} - -func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { - protoMsg := v.(proto.Message) - newSlice := make([]byte, 0, cb.lastMarshaledSize) - - cb.SetBuf(newSlice) - cb.Reset() - if err := cb.Marshal(protoMsg); err != nil { - return nil, err - } - out := cb.Bytes() - cb.lastMarshaledSize = capToMaxInt32(len(out)) - return out, nil -} - func (codec) Marshal(v interface{}) ([]byte, error) { - if pm, ok := v.(proto.Marshaler); ok { - // object can marshal itself, no need for buffer - return pm.Marshal() + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) } - - cb := protoBufferPool.Get().(*cachedProtoBuffer) - out, err := marshal(v, cb) - - // put back buffer and lose the ref to the slice - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return out, err + return proto.Marshal(vv) } func (codec) Unmarshal(data []byte, v interface{}) error { - protoMsg := v.(proto.Message) - protoMsg.Reset() - - if pu, ok := protoMsg.(proto.Unmarshaler); ok { - // object can unmarshal itself, no need for buffer - return pu.Unmarshal(data) + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - - cb := protoBufferPool.Get().(*cachedProtoBuffer) - cb.SetBuf(data) - err := cb.Unmarshal(protoMsg) - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return err + return proto.Unmarshal(data, vv) } func (codec) Name() string { return Name } - -var protoBufferPool = &sync.Pool{ - New: func() interface{} { - return &cachedProtoBuffer{ - Buffer: proto.Buffer{}, - lastMarshaledSize: 16, - } - }, -} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/grpclog/component.go b/prometheus-to-sd/vendor/google.golang.org/grpc/grpclog/component.go index b513281a3..8358dd6e2 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/grpclog/component.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/grpclog/component.go @@ -100,7 +100,7 @@ func (c *componentData) Fatalln(args ...interface{}) { } func (c *componentData) V(l int) bool { - return grpclog.Logger.V(l) + return V(l) } // Component creates a new component and returns it for logging. If a component diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/prometheus-to-sd/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 8eba2d0e0..5de66e40d 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,11 +19,13 @@ package grpclog import ( + "encoding/json" + "fmt" "io" - "io/ioutil" "log" "os" "strconv" + "strings" "google.golang.org/grpc/internal/grpclog" ) @@ -95,8 +97,9 @@ var severityName = []string{ // loggerT is the default logger used by grpclog. type loggerT struct { - m []*log.Logger - v int + m []*log.Logger + v int + jsonFormat bool } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -105,27 +108,40 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} } // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { @@ -142,58 +158,79 @@ func newLoggerV2() LoggerV2 { if vl, err := strconv.Atoi(vLevel); err == nil { v = vl } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) } func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) + g.output(infoLog, fmt.Sprint(args...)) } func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) + g.output(infoLog, fmt.Sprintln(args...)) } func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) + g.output(infoLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) + g.output(warningLog, fmt.Sprint(args...)) } func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) + g.output(warningLog, fmt.Sprintln(args...)) } func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) + g.output(warningLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) + g.output(errorLog, fmt.Sprint(args...)) } func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) + g.output(errorLog, fmt.Sprintln(args...)) } func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) + g.output(errorLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) } func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) } func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) } func (g *loggerT) V(l int) bool { @@ -204,15 +241,18 @@ func (g *loggerT) V(l int) bool { // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type DepthLoggerV2 interface { LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/idle.go b/prometheus-to-sd/vendor/google.golang.org/grpc/idle.go new file mode 100644 index 000000000..dc3dc72f6 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/idle.go @@ -0,0 +1,287 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type idlenessEnforcer interface { + exitIdleMode() error + enterIdleMode() error +} + +// idlenessManager defines the functionality required to track RPC activity on a +// channel. +type idlenessManager interface { + onCallBegin() error + onCallEnd() + close() +} + +type noopIdlenessManager struct{} + +func (noopIdlenessManager) onCallBegin() error { return nil } +func (noopIdlenessManager) onCallEnd() {} +func (noopIdlenessManager) close() {} + +// idlenessManagerImpl implements the idlenessManager interface. It uses atomic +// operations to synchronize access to shared state and a mutex to guarantee +// mutual exclusion in a critical section. +type idlenessManagerImpl struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and onCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// newIdlenessManager creates a new idleness manager implementation for the +// given idle timeout. +func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { + if idleTimeout == 0 { + return noopIdlenessManager{} + } + + i := &idlenessManagerImpl{ + enforcer: enforcer, + timeout: int64(idleTimeout), + } + i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) + return i +} + +// resetIdleTimer resets the idle timer to the given duration. This method +// should only be called from the timer callback. +func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if i.timer == nil { + // Only close sets timer to nil. We are done. + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. + i.timer.Reset(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (i *idlenessManagerImpl) handleIdleTimeout() { + if i.isClosed() { + return + } + + if atomic.LoadInt32(&i.activeCallsCount) > 0 { + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) + i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the + // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the + // channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. + if i.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.resetIdleTimer(time.Duration(i.timeout)) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (i *idlenessManagerImpl) tryEnterIdleMode() bool { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + return false + } + + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. + if err := i.enforcer.enterIdleMode(); err != nil { + logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. + i.actuallyIdle = true + return true +} + +// onCallBegin is invoked at the start of every RPC. +func (i *idlenessManagerImpl) onCallBegin() error { + if i.isClosed() { + return nil + } + + if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := i.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&i.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil +} + +// exitIdleMode instructs the channel to exit idle mode. +// +// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +func (i *idlenessManagerImpl) exitIdleMode() error { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if !i.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and onCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in onCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + + if err := i.enforcer.exitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) + return nil +} + +// onCallEnd is invoked at the end of every RPC. +func (i *idlenessManagerImpl) onCallEnd() { + if i.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&i.activeCallsCount, -1) +} + +func (i *idlenessManagerImpl) isClosed() bool { + return atomic.LoadInt32(&i.closed) == 1 +} + +func (i *idlenessManagerImpl) close() { + atomic.StoreInt32(&i.closed, 1) + + i.idleMu.Lock() + i.timer.Stop() + i.timer = nil + i.idleMu.Unlock() +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/install_gae.sh b/prometheus-to-sd/vendor/google.golang.org/grpc/install_gae.sh deleted file mode 100644 index 7c7bcada5..000000000 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/interceptor.go b/prometheus-to-sd/vendor/google.golang.org/grpc/interceptor.go index 8b7350022..bb96ef57b 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/interceptor.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/interceptor.go @@ -25,17 +25,41 @@ import ( // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error -// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC -// and it is the responsibility of the interceptor to call it. -// This is an EXPERIMENTAL API. +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. +// +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. +// +// The returned error must be compatible with the status package. type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) -// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. -// This is an EXPERIMENTAL API. +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. +// +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. +// +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) // UnaryServerInfo consists of various information about a unary RPC on @@ -48,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000..08666f62a --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb..755fdebc1 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -28,38 +28,48 @@ import ( "google.golang.org/grpc/internal/grpcutil" ) -// Logger is the global binary logger. It can be used to get binary logger for -// each method. +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be // built at init time from the configuration (environment variable or flags). // -// It is used to get a methodLogger for each individual method. +// It is used to get a MethodLogger for each individual method. var binLogger Logger -var grpclogLogger = grpclog.Component("binarylog") - -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } -// GetMethodLogger returns the methodLogger for the given methodName. +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +78,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,83 +110,83 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // -// New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // -// New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } -// getMethodLogger returns the methodLogger for the given methodName. +// getMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602..f9e80e27a 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -30,15 +30,15 @@ import ( // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. @@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger { return l } -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 5e1083539..6c3f63221 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -19,6 +19,7 @@ package binarylog import ( + "context" "net" "strings" "sync/atomic" @@ -26,7 +27,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -48,7 +49,16 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type MethodLogger interface { + Log(context.Context, LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,20 +67,26 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +// NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ headerMaxLen: h, messageMaxLen: m, callID: idGen.next(), idWithinCallGen: &callIDGenerator{}, - sink: defaultSink, // TODO(blog): make it plugable. + sink: DefaultSink, // TODO(blog): make it plugable. } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -78,18 +94,22 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: + case *binlogpb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: + case *binlogpb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: + case *binlogpb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -108,7 +128,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } @@ -119,7 +139,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } @@ -131,8 +151,11 @@ func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry + toProto() *binlogpb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. @@ -146,10 +169,10 @@ type ClientHeader struct { PeerAddr net.Addr } -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ + clientHeader := &binlogpb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, @@ -157,16 +180,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry { if c.Timeout > 0 { clientHeader.Timeout = ptypes.DurationProto(c.Timeout) } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -182,19 +205,19 @@ type ServerHeader struct { PeerAddr net.Addr } -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -210,7 +233,7 @@ type ClientMessage struct { Message interface{} } -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -225,19 +248,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -250,7 +273,7 @@ type ServerMessage struct { Message interface{} } -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -265,19 +288,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -287,15 +310,15 @@ type ClientHalfClose struct { OnClientSide bool } -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -311,7 +334,7 @@ type ServerTrailer struct { PeerAddr net.Addr } -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { grpclogLogger.Info("binarylogging: error in trailer is not a status error") @@ -327,10 +350,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), @@ -339,9 +362,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -354,15 +377,15 @@ type Cancel struct { OnClientSide bool } -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -379,15 +402,15 @@ func metadataKeyOmit(key string) bool { return strings.HasPrefix(key, "grpc-") } -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ + &binlogpb.MetadataEntry{ Key: k, Value: []byte(v), }, @@ -397,26 +420,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata { return ret } -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 + ret.Type = binlogpb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 + ret.Type = binlogpb.Address_TYPE_IPV6 } else { - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX + ret.Type = binlogpb.Address_TYPE_UNIX ret.Address = a.String() default: - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN } return ret } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 835f51040..264de387c 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -21,44 +21,36 @@ package binarylog import ( "bufio" "encoding/binary" - "fmt" "io" - "io/ioutil" "sync" "time" "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ) var ( - defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). + // DefaultSink is the sink where the logs will be written to. It's exported + // for the binarylog package to update. + DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). ) -// SetDefaultSink sets the sink where binary logs will be written to. -// -// Not thread safe. Only set during initialization. -func SetDefaultSink(s Sink) { - if defaultSink != nil { - defaultSink.Close() - } - defaultSink = s -} - // Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // @@ -66,7 +58,7 @@ func (ns *noopSink) Close() error { return nil } // message is prefixed with a 4 byte big endian unsigned integer as the length. // // No buffer is done, Close() doesn't try to close the writer. -func newWriterSink(w io.Writer) *writerSink { +func newWriterSink(w io.Writer) Sink { return &writerSink{out: w} } @@ -74,10 +66,11 @@ type writerSink struct { out io.Writer } -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) @@ -92,25 +85,28 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { func (ws *writerSink) Close() error { return nil } -type bufWriteCloserSink struct { - mu sync.Mutex - closer io.Closer - out *writerSink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. +type bufferedSink struct { + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool - writeStartOnce sync.Once - writeTicker *time.Ticker + writeTicker *time.Ticker + done chan struct{} } -func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { - // Start the write loop when Write is called. - fs.writeStartOnce.Do(fs.startFlushGoroutine) +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } if err := fs.out.Write(e); err != nil { - fs.mu.Unlock() return err } - fs.mu.Unlock() return nil } @@ -118,44 +114,57 @@ const ( bufFlushDuration = 60 * time.Second ) -func (fs *bufWriteCloserSink) startFlushGoroutine() { +func (fs *bufferedSink) startFlushGoroutine() { fs.writeTicker = time.NewTicker(bufFlushDuration) go func() { - for range fs.writeTicker.C { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } fs.mu.Lock() - fs.buf.Flush() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } fs.mu.Unlock() } }() } -func (fs *bufWriteCloserSink) Close() error { +func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() if fs.writeTicker != nil { fs.writeTicker.Stop() } - fs.mu.Lock() - fs.buf.Flush() - fs.closer.Close() - fs.out.Close() - fs.mu.Unlock() + close(fs.done) + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + if err := fs.closer.Close(); err != nil { + grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) + } + if err := fs.out.Close(); err != nil { + grpclogLogger.Warningf("failed to close the Sink: %v", err) + } return nil } -func newBufWriteCloserSink(o io.WriteCloser) Sink { +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { bufW := bufio.NewWriter(o) - return &bufWriteCloserSink{ + return &bufferedSink{ closer: o, out: newWriterSink(bufW), buf: bufW, + done: make(chan struct{}), } } - -// NewTempFileSink creates a temp file and returns a Sink that writes to this -// file. -func NewTempFileSink() (Sink, error) { - tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") - if err != nil { - return nil, fmt.Errorf("failed to create temp file: %v", err) - } - return newBufWriteCloserSink(tempFile), nil -} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 9f6a0c120..81c2f5fd7 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -35,6 +35,7 @@ import "sync" // internal/transport/transport.go for an example of this. type Unbounded struct { c chan interface{} + closed bool mu sync.Mutex backlog []interface{} } @@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded { // Put adds t to the unbounded buffer. func (b *Unbounded) Put(t interface{}) { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) == 0 { select { case b.c <- t: - b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) - b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel @@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) { // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -72,7 +79,6 @@ func (b *Unbounded) Load() { default: } } - b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -80,6 +86,20 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed. func (b *Unbounded) Get() <-chan interface{} { return b.c } + +// Close closes the unbounded buffer. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + b.closed = true + close(b.c) +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 81d3dd33e..777cbcd79 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,8 @@ package channelz import ( + "context" + "errors" "fmt" "sort" "sync" @@ -49,7 +51,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid, ref) - } else { - db.get().addChannel(id, cn, false, pid, ref) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid, ref) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,73 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } -// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUNKNOWN: - l.InfoDepth(depth+1, d.Desc) - case CtINFO: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -328,6 +367,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c @@ -335,7 +385,7 @@ func (c *channelMap) addServer(id int64, s *server) { c.mu.Unlock() } -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { c.mu.Lock() cn.cm = c cn.trace.cm = c @@ -348,7 +398,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in c.mu.Unlock() } -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { c.mu.Lock() sc.cm = c sc.trace.cm = c @@ -357,7 +407,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri c.mu.Unlock() } -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { c.mu.Lock() ls.cm = c c.listenSockets[id] = ls @@ -365,7 +415,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref c.mu.Unlock() } -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { c.mu.Lock() ns.cm = c c.normalSockets[id] = ns @@ -632,7 +682,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) if count == 0 { end = true } - var s []*SocketMetric + s := make([]*SocketMetric, 0, len(sks)) for _, ns := range sks { sm := &SocketMetric{} sm.SocketData = ns.s.ChannelzMetric() diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/id.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 000000000..c9a27acd3 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/logging.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/logging.go index e94039ee2..8e13a3d2c 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtINFO, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtINFO, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types.go index 075dc7d16..7b2f350e2 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return @@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return @@ -672,10 +672,10 @@ func (c *channelTrace) clear() { type Severity int const ( - // CtUNKNOWN indicates unknown severity of a trace event. - CtUNKNOWN Severity = iota - // CtINFO indicates info level severity of a trace event. - CtINFO + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo // CtWarning indicates warning level severity of a trace event. CtWarning // CtError indicates error level severity of a trace event. @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types_linux.go index 692dd6181..1b1c4cce3 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go index 19c2fc521..8b06eed1a 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -37,6 +38,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index fdf409d55..8d194e44e 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -1,5 +1,3 @@ -// +build linux,!appengine - /* * * Copyright 2018 gRPC authors. diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 8864a0811..837ddc402 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 000000000..32c9b5903 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/go110.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/spiffe.go similarity index 67% rename from prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/go110.go rename to prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/spiffe.go index d55b52036..25ade6230 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/go110.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -1,5 +1,3 @@ -// +build go1.10 - /* * * Copyright 2020 gRPC authors. @@ -25,38 +23,50 @@ package credentials import ( "crypto/tls" + "crypto/x509" "net/url" "google.golang.org/grpc/grpclog" ) +var logger = grpclog.Component("credentials") + // SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format // is invalid, return nil with warning. func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { return nil } + return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { + if cert == nil || cert.URIs == nil { + return nil + } var spiffeID *url.URL - for _, uri := range state.PeerCertificates[0].URIs { + for _, uri := range cert.URIs { if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { continue } // From this point, we assume the uri is intended for a SPIFFE ID. if len(uri.String()) > 2048 { - grpclog.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") return nil } - if len(uri.Host) == 0 || len(uri.RawPath) == 0 || len(uri.Path) == 0 { - grpclog.Warning("invalid SPIFFE ID: domain or workload ID is empty") + if len(uri.Host) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") return nil } if len(uri.Host) > 255 { - grpclog.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") return nil } // A valid SPIFFE certificate can only have exactly one URI SAN field. - if len(state.PeerCertificates[0].URIs) > 1 { - grpclog.Warning("invalid SPIFFE ID: multiple URI SANs") + if len(cert.URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") return nil } spiffeID = uri diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go similarity index 94% rename from prometheus-to-sd/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go rename to prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go index 2f4472bec..2919632d6 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. @@ -18,8 +16,7 @@ * */ -// Package internal contains credentials-internal code. -package internal +package credentials import ( "net" diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/util.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 000000000..f792fd22c --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 73931a94b..80fd5c7d2 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -21,18 +21,46 @@ package envconfig import ( "os" + "strconv" "strings" ) -const ( - prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" -) - var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". - Retry = strings.EqualFold(os.Getenv(retryStr), "on") // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the + // pick_first LB policy, which can be enabled by setting the environment + // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) ) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 000000000..dd314cfb1 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 000000000..02b4b6a1c --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + // XDSRingHash indicates whether ring hash support is enabled, which can be + // disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) + // XDSClientSideSecurity is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) + // XDSAggregateAndDNS indicates whether processing of aggregated cluster and + // DNS cluster is enabled, which can be disabled by setting the environment + // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + // to "false". + XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) + + // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) + // XDSFederation indicates whether federation support is enabled, which can + // be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) + + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be disabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "false". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which + // can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) +) diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index 745a166f0..b68e26a36 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -110,14 +110,17 @@ type LoggerV2 interface { // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 82af70e96..02224b42c 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { // Debugf does info logging at verbose level 2. func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. if !Logger.V(2) { return } @@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { return } InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) } // NewPrefixLogger creates a prefix logger with the given prefix. diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 200b115ca..d08e3e907 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -31,26 +31,58 @@ var ( mu sync.Mutex ) +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + // Int63n implements rand.Int63n on the grpcrand global source. func Int63n(n int64) int64 { mu.Lock() - res := r.Int63n(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Int63n(n) } // Intn implements rand.Intn on the grpcrand global source. func Intn(n int) int { mu.Lock() - res := r.Intn(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Intn(n) +} + +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) } // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() - res := r.Float64() - mu.Unlock() - return res + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() +} + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go new file mode 100644 index 000000000..37b8d4117 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // Done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + Done chan struct{} + + callbacks *buffer.Unbounded + closedMu sync.Mutex + closed bool +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + t := &CallbackSerializer{ + Done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go t.run(ctx) + return t +} + +// Schedule adds a callback to be scheduled after existing callbacks are run. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + t.closedMu.Lock() + defer t.closedMu.Unlock() + + if t.closed { + return false + } + t.callbacks.Put(f) + return true +} + +func (t *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + + defer close(t.Done) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case callback, ok := <-t.callbacks.Get(): + if !ok { + return + } + t.callbacks.Load() + callback.(func(ctx context.Context))(ctx) + } + } + + // Fetch pending callbacks if any, and execute them before returning from + // this method and closing t.Done. + t.closedMu.Lock() + t.closed = true + backlog = t.fetchPendingCallbacks() + t.callbacks.Close() + t.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } +} + +func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { + case b := <-t.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) + t.callbacks.Load() + default: + return backlog + } + } +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/gobefore110.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go similarity index 67% rename from prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/gobefore110.go rename to prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go index 743713e19..6635f7bca 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/credentials/gobefore110.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -1,8 +1,6 @@ -// +build !go1.10 - /* * - * Copyright 2020 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,14 +16,17 @@ * */ -package credentials +package grpcsync import ( - "crypto/tls" - "net/url" + "sync" ) -//TODO(ZhenLian): delete this file when we remove Go 1.9 tests. -func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { - return nil +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 000000000..9f4090967 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go similarity index 72% rename from prometheus-to-sd/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go rename to prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go index d4346e9ea..e2f948e8f 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -1,8 +1,6 @@ -// +build appengine - /* * - * Copyright 2018 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,13 +16,5 @@ * */ -package internal - -import ( - "net" -) - -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn -} +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/method.go index 4e7475060..ec62b4775 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -25,7 +25,6 @@ import ( // ParseMethod splits service and method from the input. It expects format // "/service/method". -// func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") @@ -39,6 +38,11 @@ func ParseMethod(methodName string) (service, method string, _ error) { return methodName[:pos], methodName[pos+1:], nil } +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. const baseContentType = "application/grpc" // ContentSubtype returns the content-subtype for the given content-type. The diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/regex.go similarity index 63% rename from prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go rename to prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/regex.go index 8783a8cf8..7a092b2b8 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -1,8 +1,6 @@ -// +build go1.13 - /* * - * Copyright 2019 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,16 +16,16 @@ * */ -package dns +package grpcutil -import "net" +import "regexp" -func init() { - filterError = func(err error) error { - if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { - // The name does not exist; not an error. - return nil - } - return err +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/target.go deleted file mode 100644 index 80b33cdaf..000000000 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/grpcutil/target.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcutil provides a bunch of utility functions to be used across the -// gRPC codebase. -package grpcutil - -import ( - "strings" - - "google.golang.org/grpc/resolver" -) - -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// ParseTarget splits target into a resolver.Target struct containing scheme, -// authority and endpoint. -// -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. -func ParseTarget(target string) (ret resolver.Target) { - var ok bool - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - return ret -} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/internal.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/internal.go index 818ca8579..42ff39c84 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/internal.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/internal.go @@ -38,20 +38,132 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // NewRequestInfoContext creates a new context based on the argument context attaching - // the passed in RequestInfo to the new context. - NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context - // NewClientHandshakeInfoContext returns a copy of the input context with - // the passed in ClientHandshakeInfo struct added to it. - NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool + // GetCertificateProviderBuilder returns the registered builder for the + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. + GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString interface{} // func (codes.Code) string + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports interface{} // func(*grpc.Server, string) + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions interface{} // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions interface{} // func() grpc.DialOption + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -74,3 +186,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 000000000..c82e608e0 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +type mdValue metadata.MD + +func (m mdValue) Equal(o interface{}) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) + return addr +} + +// Validate validates every pair in md with ValidatePair. +func Validate(md metadata.MD) error { + for k, vals := range md { + if err := ValidatePair(k, vals...); err != nil { + return err + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 000000000..0177af4b5 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 000000000..c7a18a948 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. +type ServerInterceptor interface { + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValue(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 304235566..09a667f33 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -34,6 +34,7 @@ import ( grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" @@ -46,6 +47,13 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) + func init() { resolver.Register(NewBuilder()) } @@ -108,7 +116,7 @@ type dnsBuilder struct{} // Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) + host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { return nil, err } @@ -132,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = customAuthorityResolver(target.URL.Host) if err != nil { return nil, err } @@ -143,7 +151,6 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts d.wg.Add(1) go d.watcher() - d.ResolveNow(resolver.ResolveNowOptions{}) return d, nil } @@ -201,28 +208,38 @@ func (d *dnsResolver) Close() { func (d *dnsResolver) watcher() { defer d.wg.Done() + backoffIndex := 1 for { - select { - case <-d.ctx.Done(): - return - case <-d.rn: - } - state, err := d.lookup() if err != nil { + // Report error to the underlying grpc.ClientConn. d.cc.ReportError(err) } else { - d.cc.UpdateState(*state) + err = d.cc.UpdateState(*state) } - // Sleep to prevent excessive re-resolutions. Incoming resolution requests - // will be queued in d.rn. - t := time.NewTimer(minDNSResRate) + var timer *time.Timer + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least + // to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } select { - case <-t.C: case <-d.ctx.Done(): - t.Stop() + timer.Stop() return + case <-timer.C: } } } @@ -260,18 +277,13 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { return newAddrs, nil } -var filterError = func(err error) error { +func handleDNSError(err error, lookupType string) error { if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). return nil } - return err -} - -func handleDNSError(err error, lookupType string) error { - err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) logger.Info(err) @@ -306,12 +318,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { } func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err } + newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { ip, ok := formatIP(a) if !ok { diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229e..afac56572 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, @@ -45,7 +52,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 000000000..160911687 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) + } + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} + if b.scheme == unixAbstractScheme { + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 000000000..11d82afcc --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index 9b26414d4..51e733e49 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -22,14 +22,21 @@ package serviceconfig import ( "encoding/json" "fmt" + "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" externalserviceconfig "google.golang.org/grpc/serviceconfig" ) -// BalancerConfig is the balancer config part that service config's -// loadBalancingConfig fields can be unmarshalled to. It's a json unmarshaller. +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. // // https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 type BalancerConfig struct { @@ -39,7 +46,31 @@ type BalancerConfig struct { type intermediateBalancerConfig []map[string]json.RawMessage -// UnmarshalJSON implements json unmarshaller. +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { var ir intermediateBalancerConfig err := json.Unmarshal(b, &ir) @@ -47,17 +78,22 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { return err } + var names []string for i, lbcfg := range ir { if len(lbcfg) != 1 { return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) } + var ( name string jsonCfg json.RawMessage ) - // Get the key:value pair from the map. + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. for name, jsonCfg = range lbcfg { } + + names = append(names, name) builder := balancer.Get(name) if builder == nil { // If the balancer is not registered, move on to the next config. @@ -69,7 +105,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { parser, ok := builder.(balancer.ConfigParser) if !ok { if string(jsonCfg) != "{}" { - grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) } // Stop at this, though the builder doesn't support parsing config. return nil @@ -86,5 +122,59 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { // return. This means we had a loadBalancingConfig slice but did not // encounter a registered policy. The config is considered invalid in this // case. - return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/status/status.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/status/status.go index 710223b8d..b0ead4f54 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/status/status.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/status/status.go @@ -97,7 +97,7 @@ func (s *Status) Err() error { if s.Code() == codes.OK { return nil } - return &Error{e: s.Proto()} + return &Error{s: s} } // WithDetails returns a new status with the provided details messages appended to the status. @@ -136,19 +136,23 @@ func (s *Status) Details() []interface{} { return details } +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + // Error wraps a pointer of a status proto. It implements error and Status, // and a nil *Error should never be returned by this package. type Error struct { - e *spb.Status + s *Status } func (e *Error) Error() string { - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) + return e.s.String() } // GRPCStatus returns the Status represented by se. func (e *Error) GRPCStatus() *Status { - return FromProto(e.e) + return e.s } // Is implements future error.Is functionality. @@ -158,5 +162,15 @@ func (e *Error) Is(target error) bool { if !ok { return false } - return proto.Equal(e.e, tse.e) + return proto.Equal(e.s.s, tse.s.s) +} + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index c50468a0f..b3a72276d 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. @@ -43,26 +41,24 @@ func GetCPUTime() int64 { return ts.Nano() } -// Rusage is an alias for syscall.Rusage under linux non-appengine environment. -type Rusage syscall.Rusage +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage = syscall.Rusage // GetRusage returns the resource usage of current process. -func GetRusage() (rusage *Rusage) { - rusage = new(Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) - return +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage } // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - f := (*syscall.Rusage)(first) - l := (*syscall.Rusage)(latest) var ( - utimeDiffs = l.Utime.Sec - f.Utime.Sec - utimeDiffus = l.Utime.Usec - f.Utime.Usec - stimeDiffs = l.Stime.Sec - f.Stime.Sec - stimeDiffus = l.Stime.Usec - f.Stime.Usec + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec ) uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index adae60d65..999f52cd7 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -35,41 +36,41 @@ var logger = grpclog.Component("core") func log() { once.Do(func() { - logger.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux environments.") }) } -// GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. func GetCPUTime() int64 { log() return 0 } -// Rusage is an empty struct under non-linux or appengine environment. +// Rusage is an empty struct under non-linux environments. type Rusage struct{} -// GetRusage is a no-op function under non-linux or appengine environment. -func GetRusage() (rusage *Rusage) { +// GetRusage is a no-op function under non-linux environments. +func GetRusage() *Rusage { log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. +// between two Rusage structs. It a no-op function for non-linux environments. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { log() return 0, 0 } -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +// SetTCPUserTimeout is a no-op function under non-linux environments. func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { log() return nil } -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments -// a negative return value indicates the operation is not supported +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported func GetTCPUserTimeout(conn net.Conn) (int, error) { log() return -1, nil diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 40ef23923..be5a9c81e 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -20,13 +20,19 @@ package transport import ( "bytes" + "errors" "fmt" + "net" "runtime" + "strconv" "sync" "sync/atomic" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" ) var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { @@ -128,6 +134,16 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM +type earlyAbortStream struct { + httpStatus uint32 + streamID uint32 + contentSubtype string + status *status.Status + rst bool +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + type dataFrame struct { streamID uint32 endStream bool @@ -177,7 +193,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -195,6 +211,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -284,7 +308,7 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // *chan struct{} + trfChan atomic.Value // chan struct{} } func newControlBuffer(done <-chan struct{}) *controlBuffer { @@ -298,10 +322,10 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer { // throttle blocks if there are too many incomingSettings/cleanupStreams in the // controlbuf. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(*chan struct{}) + ch, _ := c.trfChan.Load().(chan struct{}) if ch != nil { select { - case <-*ch: + case <-ch: case <-c.done: } } @@ -335,8 +359,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - ch := make(chan struct{}) - c.trfChan.Store(&ch) + c.trfChan.Store(make(chan struct{})) } } c.mu.Unlock() @@ -377,9 +400,9 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are removing the frame that put us over the // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(*chan struct{}) - close(*ch) - c.trfChan.Store((*chan struct{})(nil)) + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) } c.transportResponseFrames-- } @@ -395,8 +418,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - c.finish() - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -420,6 +442,14 @@ func (c *controlBuffer) finish() { hdr.onOrphaned(ErrConnClosing) } } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) c.mu.Unlock() } @@ -458,12 +488,14 @@ type loopyWriter struct { hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool + conn net.Conn + logger *grpclog.PrefixLogger // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -476,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hBuf: &buf, hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, + conn: conn, + logger: logger, } return l } @@ -493,23 +527,26 @@ const minBatchSize = 1000 // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the activeStreams linked-list. -// This results in writing of HTTP2 frames into an underlying write buffer. -// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. -// As an optimization, to increase the batch size for each flush, loopy yields the processor, once -// if the batch size is too low to give stream goroutines a chance to fill it up. +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes and closes the underlying connection. Otherwise, the connection is +// left open to allow the I/O error to be encountered by the reader instead. func (l *loopyWriter) run() (err error) { defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) } + if !isIOError(err) { + l.framer.writer.Flush() + l.conn.Close() + } + l.cbuf.finish() }() for { it, err := l.cbuf.get(true) @@ -554,7 +591,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -563,11 +599,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment - return nil + return } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { @@ -575,10 +611,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) - return nil + return } } - return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { @@ -586,13 +621,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - if err := l.applySettings(s.ss); err != nil { - return err - } + l.applySettings(s.ss) return l.framer.fr.WriteSettingsAck() } -func (l *loopyWriter) registerStreamHandler(h *registerStream) error { +func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ id: h.streamID, state: empty, @@ -600,15 +633,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error { wq: h.wq, } l.estdStreams[h.streamID] = str - return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) } return nil } @@ -635,19 +667,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { itl: &itemList{}, wq: h.wq, } - str.itl.enqueue(h) - return l.originateStream(str) + return l.originateStream(str, h) } -func (l *loopyWriter) originateStream(str *outStream) error { - hdr := str.itl.dequeue().(*headerFrame) - if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err - } - // Other errors(errStreamDrain) need not close transport. +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) return nil } + if err := hdr.initStream(str.id); err != nil { + return err + } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } @@ -662,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) } } } @@ -701,10 +734,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He return nil } -func (l *loopyWriter) preprocessData(df *dataFrame) error { +func (l *loopyWriter) preprocessData(df *dataFrame) { str, ok := l.estdStreams[df.streamID] if !ok { - return nil + return } // If we got data for a stream it means that // stream was originated and the headers were sent out. @@ -713,7 +746,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error { str.state = active l.activeStreams.enqueue(str) } - return nil } func (l *loopyWriter) pingHandler(p *ping) error { @@ -724,9 +756,8 @@ func (l *loopyWriter) pingHandler(p *ping) error { } -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { o.resp <- l.sendQuota - return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { @@ -743,8 +774,35 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return err } } - if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("finished processing active streams while in draining mode") + } + return nil +} + +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } } return nil } @@ -753,7 +811,8 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + // Flush and close the connection; we are done with it. + return errors.New("received GOAWAY with no active streams") } } return nil @@ -774,7 +833,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: - return l.incomingWindowUpdateHandler(i) + l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: @@ -784,25 +843,32 @@ func (l *loopyWriter) handle(i interface{}) error { case *headerFrame: return l.headerHandler(i) case *registerStream: - return l.registerStreamHandler(i) + l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: - return l.preprocessData(i) + l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: - return l.outFlowControlSizeRequestHandler(i) + l.outFlowControlSizeRequestHandler(i) + case closeConnection: + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing default: return fmt.Errorf("transport: unknown control message type %T", i) } + return nil } -func (l *loopyWriter) applySettings(ss []http2.Setting) error { +func (l *loopyWriter) applySettings(ss []http2.Setting) { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: @@ -821,7 +887,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { updateHeaderTblSize(l.hEnc, s.Val) } } - return nil } // processData removes the first stream from active streams, writes out at most 16KB @@ -838,9 +903,9 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. + // maximum possible HTTP2 frame size. if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true @@ -855,7 +920,7 @@ func (l *loopyWriter) processData() (bool, error) { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, nil + return false, err } } else { l.activeStreams.enqueue(str) diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/defaults.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/defaults.go index 9fa306b2e..bc8ee0747 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -47,3 +47,9 @@ const ( defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index f262edd8e..97198c515 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -136,12 +136,10 @@ type inFlow struct { // newLimit updates the inflow window to a new value n. // It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) uint32 { +func (f *inFlow) newLimit(n uint32) { f.mu.Lock() - d := n - f.limit f.limit = n f.mu.Unlock() - return d } func (f *inFlow) maybeAdjust(n uint32) uint32 { diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 05d3871e6..98f80e3fa 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -46,24 +47,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -75,11 +84,14 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta contentSubtype: contentSubtype, stats: stats, } + st.logger = prefixLoggerForServerHandlerTransport(st) if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +109,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -138,16 +152,19 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler + stats []stats.Handler + logger *grpclog.PrefixLogger } -func (ht *serverHandlerTransport) Close() error { - ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -229,15 +246,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed - if ht.stats != nil { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -315,10 +332,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { }) if err == nil { - if ht.stats != nil { + for _, sh := range ht.stats { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + sh.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) @@ -347,7 +364,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req @@ -370,14 +387,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: ht.RemoteAddr(), Compression: s.recvCompress, } - ht.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, @@ -436,17 +453,17 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(debugData string) { panic("Drain() is not implemented") } // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http2_client.go index e7f232113..326bf0848 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -24,6 +24,8 @@ import ( "io" "math" "net" + "net/http" + "path/filepath" "strconv" "strings" "sync" @@ -32,13 +34,17 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpcutil" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -54,12 +60,16 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string - md interface{} + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address + md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter remoteAddr net.Addr @@ -75,6 +85,7 @@ type http2Client struct { framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. controlBuf *controlBuffer fc *trInFlow // The scheme used: https if TLS is on, http otherwise. @@ -87,7 +98,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -95,17 +106,15 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 + registeredCompressors string + // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables state transportState activeStreams map[uint32]*Stream @@ -114,6 +123,9 @@ type http2Client struct { // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string // A condition variable used to signal when the keepalive goroutine should // go dormant. The condition for dormancy is based on the number of active // streams and the `PermitWithoutStream` keepalive client parameter. And @@ -126,22 +138,45 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData - onGoAway func(GoAwayReason) - onClose func() + onClose func(GoAwayReason) bufferPool *bufferPool connectionID uint64 + logger *grpclog.PrefixLogger } -func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) if fn != nil { - return fn(ctx, addr) + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) + } + return fn(ctx, address) } - return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + if !ok { + networkType, address = parseDialTarget(address) + } + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return (&net.Dialer{}).DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -163,7 +198,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -172,19 +207,51 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }() - conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("Aborting due to connect deadline expiring: %v", err) + } + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -216,16 +283,22 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - // gRPC, resolver, balancer etc. can specify arbitrary data in the - // Attributes field of resolver.Address, which is shoved into connectCtx - // and passed to the credential handshaker. This makes it possible for - // address specific arbitrary data to reach the credential handshaker. - contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) - connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } isSecure = true if transportCreds.Info().SecurityProtocol == "tls" { scheme = "https" @@ -248,7 +321,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, - md: addr.Metadata, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -263,18 +337,25 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), - onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), + onClose: onClose, + } + t.logger = prefixLoggerForClientTransport(t) + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md } t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { @@ -287,37 +368,51 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + return nil, err } if n != len(clientPreface) { - t.Close() - return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + return nil, err } var ss []http2.Setting @@ -335,14 +430,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } err = t.framer.fr.WriteSettings(ss...) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + return nil, err } } @@ -352,18 +447,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) - err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } - } - // If it's a connection error, let reader goroutine handle it - // since there might be data in the buffers. - if _, ok := err.(net.Error); !ok { - t.conn.Close() - } + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy.run() close(t.writerDone) }() return t, nil @@ -379,6 +464,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { buf: newRecvBuffer(), headerChan: make(chan struct{}), contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -408,7 +494,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, - AuthInfo: t.authInfo, + AuthInfo: t.authInfo, // Can be nil } } @@ -418,7 +504,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) Method: callHdr.Method, AuthInfo: t.authInfo, } - ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err @@ -444,9 +530,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -481,25 +580,23 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for _, vv := range added { for i, v := range vv { if i%2 == 0 { - k = v + k = strings.ToLower(v) continue } // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } - headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } - if md, ok := t.md.(*metadata.MD); ok { - for k, vv := range *md { - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } return headerFields, nil @@ -528,11 +625,15 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } - return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2. @@ -549,12 +650,22 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // Note: if these credentials are provided both via dial options and call // options, then both sets of credentials will be applied. if callCreds := callHdr.Creds; callCreds != nil { - if !t.isSecure && callCreds.RequireTransportSecurity() { - return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { @@ -566,26 +677,46 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } -// PerformedIOError wraps an error to indicate IO may have been performed -// before the error occurred. -type PerformedIOError struct { +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { Err error + + AllowTransparentRetry bool } -// Error implements error. -func (p PerformedIOError) Error() string { - return p.Err.Error() +func (e NewStreamError) Error() string { + return e.Err.Error() } // NewStream creates a stream and registers it into the transport as "active" -// streams. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +// streams. All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - // We may have performed I/O in the per-RPC creds callback, so do not - // allow transparent retry. - return nil, PerformedIOError{err} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -607,17 +738,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea endStream: false, initStream: func(id uint32) error { t.mu.Lock() - if state := t.state; state != reachable { + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { t.mu.Unlock() - // Do a quick cleanup. - err := error(errStreamDrain) - if state == closing { - err = ErrConnClosing - } - cleanup(err) - return err + cleanup(ErrConnClosing) + return ErrConnClosing } - t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) @@ -634,6 +761,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } firstTry := true var ch chan struct{} + transportDrainRequired := false checkForStreamQuota := func(it interface{}) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { @@ -649,8 +777,20 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea h := it.(*headerFrame) h.streamID = t.nextID t.nextID += 2 + + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -676,52 +816,56 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } for { success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true + return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { - return nil, err + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, hdrListSizeErr + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { case <-ch: - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, errStreamDrain + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, ErrConnClosing + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if len(t.statsHandlers) != 0 { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) + } + } + if transportDrainRequired { + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") } - t.statsHandler.HandleRPC(s.ctx, outHeader) + t.GracefulClose() } return s, nil } @@ -796,25 +940,29 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) // This will unblock write. close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } } // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. -func (t *http2Client) Close() error { +func (t *http2Client) Close(err error) { t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() - return nil + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + if t.state != draining { + t.onClose(GoAwayInvalid) } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. - t.onClose() t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -826,21 +974,30 @@ func (t *http2Client) Close() error { t.mu.Unlock() t.controlBuf.finish() t.cancel() - err := t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) + t.conn.Close() + channelz.RemoveEntry(t.channelzID) + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) } + // Notify all active streams. for _, s := range streams { - t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } - return err } // GracefulClose sets the state to draining, which prevents new streams from @@ -855,11 +1012,15 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") + } + t.onClose(GoAwayInvalid) t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) @@ -919,13 +1080,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() updateIWS := func(interface{}) bool { t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() return true } t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) @@ -1000,7 +1161,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { } // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) } } @@ -1016,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) } statusCode = codes.Unknown } @@ -1099,15 +1260,17 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - if logger.V(logLevel) { - logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") - } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") } id := f.LastStreamID - if id > 0 && id%2 != 1 { + if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1125,18 +1288,20 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) return } default: t.setGoAwayReason(f) close(t.goAway) - t.controlBuf.put(&incomingGoAway{}) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. - t.onGoAway(t.goAwayReason) - t.state = draining + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1144,24 +1309,35 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } } } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close() + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason @@ -1171,12 +1347,17 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayTooManyPings } } + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } } -func (t *http2Client) GetGoAwayReason() GoAwayReason { +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { t.mu.Lock() defer t.mu.Unlock() - return t.goAwayReason + return t.goAwayReason, t.goAwayDebugMessage } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -1203,35 +1384,128 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. - state.data.isGRPC = !initialHeader - if err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) return } - isHeader := false - defer func() { - if t.statsHandler != nil { - if isHeader { - inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: s.header.Copy(), - Compression: s.recvCompress, - } - t.statsHandler.HandleRPC(s.ctx, inHeader) - } else { - inTrailer := &stats.InTrailer{ - Client: true, - WireLength: int(frame.Header().Length), - Trailer: s.trailer.Copy(), - } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break } + mdata[hf.Name] = append(mdata[hf.Name], v) } - }() + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + isHeader := false // If headerChan hasn't been closed yet if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { @@ -1242,9 +1516,9 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. - s.recvCompress = state.data.encoding - if len(state.data.mdata) > 0 { - s.header = state.data.mdata + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata } } else { // HEADERS frame block carries a Trailers-Only. @@ -1253,40 +1527,67 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } + for _, sh := range t.statsHandlers { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + sh.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + sh.HandleRPC(s.ctx, inTrailer) + } + } + if !endStream { return } + if statusGen == nil { + statusGen = status.New(rawStatusCode, grpcMessage) + } + // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - t.Close() // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.Close() // this kicks off resetTransport, so must be last before return - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { @@ -1306,13 +1607,19 @@ func (t *http2Client) reader() { if s != nil { // use error detail to provide better err message code := http2ErrConvTab[se.Code] - msg := t.framer.fr.ErrorDetail().Error() + errorDetail := t.framer.fr.ErrorDetail() + var msg string + if errorDetail != nil { + msg = errorDetail.Error() + } else { + msg = "received invalid frame" + } t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue } else { // Transport error. - t.Close() + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) return } } @@ -1346,7 +1653,7 @@ func minTime(a, b time.Duration) time.Duration { return b } -// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. @@ -1371,7 +1678,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) return } t.mu.Lock() @@ -1483,3 +1790,9 @@ func (t *http2Client) getOutFlowWindow() int64 { return -2 } } + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 04cbedf79..ec4eef213 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -26,6 +26,7 @@ import ( "io" "math" "net" + "net/http" "strconv" "sync" "sync/atomic" @@ -34,12 +35,16 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -51,10 +56,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -72,7 +77,6 @@ type http2Server struct { writerDone chan struct{} // sync point to enable testing. remoteAddr net.Addr localAddr net.Addr - maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle framer *framer @@ -82,7 +86,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -101,13 +105,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when drain(...) is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -117,16 +121,44 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger } -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize @@ -139,15 +171,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - maxStreams := config.MaxStreams - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { + if config.MaxStreams != math.MaxUint32 { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, + Val: config.MaxStreams, }) } dynamicWindow := true @@ -205,27 +232,33 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } + done := make(chan struct{}) t := &http2Server{ - ctx: context.Background(), + ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, + authInfo: authInfo, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), - maxStreams: maxStreams, + maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, @@ -233,6 +266,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err czData: new(channelzData), bufferPool: newBufferPool(), } + t.logger = prefixLoggerForServerTransport(t) + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ @@ -240,31 +277,39 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { if err != nil { - t.Close() + t.Close(err) } }() // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. + if err == io.EOF { + return nil, io.EOF + } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } if !bytes.Equal(preface, clientPreface) { @@ -286,100 +331,206 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } - } - t.conn.Close() + t.loopy.run() close(t.writerDone) }() go t.keepalive() return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + streamID := frame.Header().StreamID - state := &decodeState{ - serverSide: true, - } - if err := state.decodeHeader(frame); err != nil { - if se, ok := status.FromError(err); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: statusCodeConvTab[se.Code()], - onWrite: func() {}, - }) - } - return false + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) + return nil + } + + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } + t.maxStreamID = streamID buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.data.encoding, - method: state.data.method, - contentSubtype: state.data.contentSubtype, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } + var ( + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(metadata.MD, len(frame.Fields)) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + contentType = hf.Value + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) + } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") + } + protocolError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return nil + } + + if protocolError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil + } + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if state.data.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. - if len(state.data.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) - } - if state.data.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) - } - if state.data.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) - } - if t.inTapHandle != nil { - var err error - info := &tap.Info{ - FullMethodName: state.data.method, + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) } - s.ctx, err = t.inTapHandle(s.ctx, info) - if err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) - } - t.controlBuf.put(&cleanupStream{ - streamID: s.id, - rst: true, - rstCode: http2.ErrCodeRefusedStream, - onWrite: func() {}, - }) - s.cancel() - return false + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -390,18 +541,45 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } - if streamID%2 != 1 || streamID <= t.maxStreamID { + if httpMethod != http.MethodPost { t.mu.Unlock() - // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) s.cancel() - return true + return nil + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + t.mu.Unlock() + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + rst: !frame.StreamEnded(), + }) + return nil + } } - t.maxStreamID = streamID t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} @@ -415,17 +593,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(state.data.mdata).Copy(), + Header: mdata.Copy(), } - t.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) @@ -446,7 +624,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -460,8 +638,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) } t.mu.Lock() s := t.activeStreams[se.StreamID] @@ -479,19 +657,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -507,8 +682,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) } } } @@ -611,6 +786,10 @@ func (t *http2Server) handleData(f *http2.DataFrame) { if !ok { return } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } if size > 0 { if err := s.fc.onData(size); err != nil { t.closeStream(s, true, http2.ErrCodeFlowControl, false) @@ -631,7 +810,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { s.write(recvMsg{buffer: buffer}) } } - if f.Header().Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { // Received the end of stream from the client. s.compareAndSwapState(streamActive, streamReadDone) s.write(recvMsg{err: io.EOF}) @@ -688,8 +867,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -731,10 +910,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -766,8 +942,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - if logger.V(logLevel) { - logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } @@ -775,12 +951,27 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } - s.hdrMu.Lock() + if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -789,10 +980,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } - s.hdrMu.Unlock() return nil } @@ -823,14 +1012,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { + for _, sh := range t.stats { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } - t.stats.HandleRPC(s.Context(), outHeader) + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -840,17 +1029,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { return nil } - s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() return err } } else { // Send a trailer only response. @@ -865,7 +1056,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -879,7 +1070,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { endStream: true, onWrite: t.setResetPingStrikes, } - s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) if !success { if err != nil { @@ -891,10 +1082,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { + for _, sh := range t.stats { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -906,23 +1097,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -932,12 +1112,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -986,20 +1161,20 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain("max_idle") return } idleTimer.Reset(val) case <-ageTimer.C: - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain("max_age") ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: // Close the connection after grace period. - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to maximum connection age.") + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1015,10 +1190,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1045,11 +1217,14 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() error { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) } t.state = closing streams := t.activeStreams @@ -1057,27 +1232,22 @@ func (t *http2Server) Close() error { t.mu.Unlock() t.controlBuf.finish() close(t.done) - err := t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() } - if t.stats != nil { + for _, sh := range t.stats { connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } - return err } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1099,6 +1269,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1118,6 +1293,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1133,18 +1313,14 @@ func (t *http2Server) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Server) Drain() { - t.drain(http2.ErrCodeNo, []byte{}) -} - -func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) + t.drainEvent = grpcsync.NewEvent() + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1152,39 +1328,41 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} // Handles outgoing GoAway and returns true if loopy needs to put itself // in draining mode. func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() t.mu.Lock() if t.state == closing { // TODO(mmukhi): This seems unnecessary. t.mu.Unlock() + t.maxStreamMu.Unlock() // The transport is closing. return false, ErrConnClosing } - sid := t.maxStreamID if !g.headsUp { // Stop accepting more streams now. t.state = draining + sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() + t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + if retErr != nil { + return false, retErr } return true, nil } t.mu.Unlock() + t.maxStreamMu.Unlock() // For a graceful close, send out a GoAway with stream ID of MaxUInt32, // Follow that with a ping and wait for the ack to come back or a timer // to expire. During this time accept new streams since they might have // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { @@ -1194,7 +1372,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return @@ -1253,6 +1431,13 @@ func (t *http2Server) getOutFlowWindow() int64 { } } +func (t *http2Server) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 @@ -1262,3 +1447,18 @@ func getJitter(v time.Duration) time.Duration { j := grpcrand.Int63n(2*r) - r return time.Duration(j) } + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http_util.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http_util.go index 5e1e7a65d..19cbb18f5 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -20,13 +20,14 @@ package transport import ( "bufio" - "bytes" "encoding/base64" + "errors" "fmt" "io" "math" "net" "net/http" + "net/url" "strconv" "strings" "time" @@ -37,22 +38,14 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues http2InitHeaderTableSize = 4096 - // baseContentType is the base content-type for gRPC. This is a valid - // content-type on it's own, but can also include a content-subtype such as - // "proto" as a suffix after "+" or ";". See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - ) var ( @@ -73,13 +66,6 @@ var ( http2.ErrCodeInadequateSecurity: codes.PermissionDenied, http2.ErrCodeHTTP11Required: codes.Internal, } - statusCodeConvTab = map[codes.Code]http2.ErrCode{ - codes.Internal: http2.ErrCodeInternal, - codes.Canceled: http2.ErrCodeCancel, - codes.Unavailable: http2.ErrCodeRefusedStream, - codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, - codes.PermissionDenied: http2.ErrCodeInadequateSecurity, - } // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. HTTPStatusConvTab = map[int]codes.Code{ // 400 Bad Request - INTERNAL. @@ -99,55 +85,8 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } - logger = grpclog.Component("transport") ) -type parsedHeaderData struct { - encoding string - // statusGen caches the stream status received from the trailer the server - // sent. Client side only. Do not access directly. After all trailers are - // parsed, use the status method to retrieve the status. - statusGen *status.Status - // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not - // intended for direct access outside of parsing. - rawStatusCode *int - rawStatusMsg string - httpStatus *int - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte - contentSubtype string - - // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). - // - // We are in gRPC mode (peer speaking gRPC) if: - // * We are client side and have already received a HEADER frame that indicates gRPC peer. - // * The header contains valid a content-type, i.e. a string starts with "application/grpc" - // And we should handle error specific to gRPC. - // - // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we - // are in HTTP fallback mode, and should handle error specific to HTTP. - isGRPC bool - grpcErr error - httpErr error - contentTypeErr string -} - -// decodeState configures decoding criteria and records the decoded data. -type decodeState struct { - // whether decoding on server side or not - serverSide bool - - // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS - // frame once decodeHeader function has been invoked and returned. - data parsedHeaderData -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -185,14 +124,6 @@ func isWhitelistedHeader(hdr string) bool { } } -func (d *decodeState) status() *status.Status { - if d.data.statusGen == nil { - // No status-details were provided; generate status using code/msg. - d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) - } - return d.data.statusGen -} - const binHdrSuffix = "-bin" func encodeBinHeader(v []byte) string { @@ -222,166 +153,16 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - return status.Error(codes.Internal, "peer header list size exceeded limit") - } - - for _, hf := range frame.Fields { - d.processHeaderField(hf) - } - - if d.data.isGRPC { - if d.data.grpcErr != nil { - return d.data.grpcErr - } - if d.serverSide { - return nil - } - if d.data.rawStatusCode == nil && d.data.statusGen == nil { - // gRPC status doesn't exist. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.data.rawStatusCode = &code - } - return nil - } - - // HTTP fallback mode - if d.data.httpErr != nil { - return d.data.httpErr - } - - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - ok bool - ) - - if d.data.httpStatus != nil { - code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] - if !ok { - code = codes.Unknown - } - } - - return status.Error(code, d.constructHTTPErrMsg()) -} - -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func (d *decodeState) constructHTTPErrMsg() string { - var errMsgs []string - - if d.data.httpStatus == nil { - errMsgs = append(errMsgs, "malformed header: missing HTTP status") - } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) - } - - if d.data.contentTypeErr == "" { - errMsgs = append(errMsgs, "transport: missing content-type field") - } else { - errMsgs = append(errMsgs, d.data.contentTypeErr) +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err } - - return strings.Join(errMsgs, "; ") -} - -func (d *decodeState) addMetadata(k, v string) { - if d.data.mdata == nil { - d.data.mdata = make(map[string][]string) - } - d.data.mdata[k] = append(d.data.mdata[k], v) -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) - if !validContentType { - d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) - return - } - d.data.contentSubtype = contentSubtype - // TODO: do we want to propagate the whole content-type in the metadata, - // or come up with a way to just propagate the content-subtype if it was set? - // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} - // in the metadata? - d.addMetadata(f.Name, f.Value) - d.data.isGRPC = true - case "grpc-encoding": - d.data.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.data.rawStatusCode = &code - case "grpc-message": - d.data.rawStatusMsg = decodeGrpcMessage(f.Value) - case "grpc-status-details-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - s := &spb.Status{} - if err := proto.Unmarshal(v, s); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - d.data.statusGen = status.FromProto(s) - case "grpc-timeout": - d.data.timeoutSet = true - var err error - if d.data.timeout, err = decodeTimeout(f.Value); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) - } - case ":path": - d.data.method = f.Value - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - return - } - d.data.httpStatus = &code - case "grpc-tags-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - return - } - d.data.statsTags = v - d.addMetadata(f.Name, string(v)) - case "grpc-trace-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - return - } - d.data.statsTrace = v - d.addMetadata(f.Name, string(v)) - default: - if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { - break - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - if logger.V(logLevel) { - logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - } - return - } - d.addMetadata(f.Name, v) + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err } + return status.FromProto(st), nil } type timeoutUnit uint8 @@ -468,13 +249,13 @@ func encodeGrpcMessage(msg string) string { } func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) continue } @@ -483,14 +264,14 @@ func encodeGrpcMessageUnchecked(msg string) string { // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) + sb.WriteByte(b) } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) } } msg = msg[size:] } - return buf.String() + return sb.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. @@ -508,23 +289,23 @@ func decodeGrpcMessage(msg string) string { } func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { - buf.WriteByte(c) + sb.WriteByte(c) } else { - buf.WriteByte(byte(parsed)) + sb.WriteByte(byte(parsed)) i += 2 } } else { - buf.WriteByte(c) + sb.WriteByte(c) } } - return buf.String() + return sb.String() } type bufWriter struct { @@ -533,8 +314,6 @@ type bufWriter struct { batchSize int conn net.Conn err error - - onFlush func() } func newBufWriter(conn net.Conn, batchSize int) *bufWriter { @@ -550,7 +329,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - return w.conn.Write(b) + n, err = w.conn.Write(b) + return n, toIOError(err) } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) @@ -571,14 +351,31 @@ func (w *bufWriter) Flush() error { if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) w.offset = 0 return w.err } +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + type framer struct { writer *bufWriter fr *http2.Framer @@ -605,3 +402,31 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/logging.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/logging.go new file mode 100644 index 000000000..42ed2b07a --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 000000000..c11b52782 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dialer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValue(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/proxy.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/proxy.go similarity index 71% rename from prometheus-to-sd/vendor/google.golang.org/grpc/proxy.go rename to prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/proxy.go index f8f69bfb7..415961987 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/proxy.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -16,13 +16,12 @@ * */ -package grpc +package transport import ( "bufio" "context" "encoding/base64" - "errors" "fmt" "io" "net" @@ -34,13 +33,11 @@ import ( const proxyAuthHeaderKey = "Proxy-Authorization" var ( - // errDisabled indicates that proxy is disabled for the address. - errDisabled = errors.New("proxy is disabled for the address") // The following variable will be overwritten in the tests. httpProxyFromEnvironment = http.ProxyFromEnvironment ) -func mapAddress(ctx context.Context, address string) (*url.URL, error) { +func mapAddress(address string) (*url.URL, error) { req := &http.Request{ URL: &url.URL{ Scheme: "https", @@ -51,9 +48,6 @@ func mapAddress(ctx context.Context, address string) (*url.URL, error) { if err != nil { return nil, err } - if url == nil { - return nil, errDisabled - } return url, nil } @@ -76,7 +70,7 @@ func basicAuth(username, password string) string { return base64.StdEncoding.EncodeToString([]byte(auth)) } -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { defer func() { if err != nil { conn.Close() @@ -115,32 +109,28 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri return &bufConn{Conn: conn, r: r}, nil } -// newProxyDialer returns a dialer that connects to proxy first if necessary. -// The returned dialer checks if a proxy is necessary, dial to the proxy with the -// provided dialer, does HTTP CONNECT handshake and returns the connection. -func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { - return func(ctx context.Context, addr string) (conn net.Conn, err error) { - var newAddr string - proxyURL, err := mapAddress(ctx, addr) - if err != nil { - if err != errDisabled { - return nil, err - } - newAddr = addr - } else { - newAddr = proxyURL.Host - } +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { + newAddr := addr + proxyURL, err := mapAddress(addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } - conn, err = dialer(ctx, newAddr) - if err != nil { - return - } - if proxyURL != nil { - // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) - } + conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + if err != nil { return } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + } + return } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/transport.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/transport.go index b74030a96..aa1c89659 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -30,9 +30,11 @@ import ( "net" "sync" "sync/atomic" + "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -41,6 +43,10 @@ import ( "google.golang.org/grpc/tap" ) +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + const logLevel = 2 type bufferPool struct { @@ -241,6 +247,7 @@ type Stream struct { ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) method string // the associated RPC method of the stream recvCompress string @@ -250,6 +257,9 @@ type Stream struct { fc *inFlow wq *writeQuota + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) @@ -338,8 +348,24 @@ func (s *Stream) RecvCompress() string { } // SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() string { + return s.clientAdvertisedCompressors } // Done returns a channel which is closed when it receives the final status @@ -363,9 +389,15 @@ func (s *Stream) Header() (metadata.MD, error) { return s.header.Copy(), nil } s.waitOnHeader() + if !s.headerValid { return nil, s.status.Err() } + + if s.noHeaders { + return nil, ErrNoHeaders + } + return s.header.Copy(), nil } @@ -517,26 +549,21 @@ const ( // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 - AuthInfo credentials.AuthInfo + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { - return newHTTP2Server(conn, config) -} - // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. @@ -555,8 +582,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. @@ -566,15 +593,17 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 + // UseProxy specifies if a proxy should be used. + UseProxy bool } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) } // Options provides additional hints and information for message @@ -609,6 +638,8 @@ type CallHdr struct { ContentSubtype string PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished } // ClientTransport is the common interface for all gRPC client-side transport @@ -617,7 +648,7 @@ type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. - Close() error + Close(err error) // GracefulClose starts to tear down the transport: the transport will stop // accepting new RPCs and NewStream will return error. Once all streams are @@ -651,8 +682,9 @@ type ClientTransport interface { // HTTP/2). GoAway() <-chan struct{} - // GetGoAwayReason returns the reason why GoAway frame was received. - GetGoAwayReason() GoAwayReason + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr @@ -688,13 +720,13 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() error + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() @@ -739,6 +771,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go new file mode 100644 index 000000000..e8b492774 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/metadata/metadata.go b/prometheus-to-sd/vendor/google.golang.org/grpc/metadata/metadata.go index cf6d1b947..a2cdcaf12 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/metadata/metadata.go @@ -41,16 +41,17 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { - md := MD{} + md := make(MD, len(m)) for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) @@ -62,10 +63,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -74,14 +76,10 @@ func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } - md := MD{} - var key string - for i, s := range kv { - if i%2 == 0 { - key = strings.ToLower(s) - continue - } - md[key] = append(md[key], s) + md := make(MD, len(kv)/2) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) } return md } @@ -93,16 +91,24 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { - return Join(md) + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out } // Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. func (md MD) Get(k string) []string { k = strings.ToLower(k) return md[k] } // Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. func (md MD) Set(k string, vals ...string) { if len(vals) == 0 { return @@ -111,7 +117,10 @@ func (md MD) Set(k string, vals ...string) { md[k] = vals } -// Append adds the values to key k, not overwriting what was already stored at that key. +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. func (md MD) Append(k string, vals ...string) { if len(vals) == 0 { return @@ -120,9 +129,17 @@ func (md MD) Append(k string, vals ...string) { md[k] = append(md[k], vals...) } +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + // Join joins any number of mds into a single MD. -// The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -149,8 +166,8 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { } // AppendToOutgoingContext returns a new context with the provided kv merged -// with any existing metadata in the context. Please refer to the -// documentation of Pairs for a description of kv. +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) @@ -158,25 +175,76 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) - added[len(added)-1] = make([]string, len(kv)) - copy(added[len(added)-1], kv) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } -// FromIncomingContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromIncomingContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdIncomingKey{}).(MD) - return +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := make(MD, len(md)) + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + return out, true +} + +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { + return copyOf(v) + } + } + return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals } -// FromOutgoingContextRaw returns the un-merged, intermediary contents -// of rawMD. Remember to perform strings.ToLower on the keys. The returned -// MD should not be modified. Writing to it may cause races. Modification -// should be made to copies of the returned MD. +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). // -// This is intended for gRPC-internal use ONLY. +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { @@ -186,21 +254,39 @@ func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { return raw.md, raw.added, true } -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. func FromOutgoingContext(ctx context.Context) (MD, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, false } - mds := make([]MD, 0, len(raw.added)+1) - mds = append(mds, raw.md) - for _, vv := range raw.added { - mds = append(mds, Pairs(vv...)) + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } } - return Join(mds...), ok + return out, ok } type rawMD struct { diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/picker_wrapper.go b/prometheus-to-sd/vendor/google.golang.org/grpc/picker_wrapper.go index a58174b6f..02f975951 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/picker_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -35,6 +36,7 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool + idle bool blockingCh chan struct{} picker balancer.Picker } @@ -46,7 +48,11 @@ func newPickerWrapper() *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done { + if pw.done || pw.idle { + // There is a small window where a picker update from the LB policy can + // race with the channel going to idle mode. If the picker is idle here, + // it is because the channel asked it to do so, and therefore it is sage + // to ignore the update from the LB policy. pw.mu.Unlock() return } @@ -57,12 +63,16 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Unlock() } -func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() - return func(b balancer.DoneInfo) { + done := result.Done + result.Done = func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { @@ -81,7 +91,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { var ch chan struct{} var lastPickErr error @@ -89,7 +99,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Lock() if pw.done { pw.mu.Unlock() - return nil, nil, ErrClientConnClosing + return nil, balancer.PickResult{}, ErrClientConnClosing } if pw.picker == nil { @@ -110,9 +120,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, nil, status.Error(codes.Canceled, errStr) + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -124,14 +134,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Unlock() pickResult, err := p.Pick(info) - if err != nil { if err == balancer.ErrNoSubConnAvailable { continue } - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, balancer.PickResult{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -139,19 +152,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, nil, status.Error(codes.Unavailable, err.Error()) + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := pickResult.SubConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - logger.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, pickResult.Done), nil + doneChannelzWrapper(acbw, &pickResult) + return t, pickResult, nil } - return t, pickResult.Done, nil + return t, pickResult, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. @@ -175,3 +189,28 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +func (pw *pickerWrapper) enterIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.idle = true +} + +func (pw *pickerWrapper) exitIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) + pw.idle = false +} + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/pickfirst.go b/prometheus-to-sd/vendor/google.golang.org/grpc/pickfirst.go index 56e33f6c7..abe266b02 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/pickfirst.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/pickfirst.go @@ -19,11 +19,15 @@ package grpc import ( + "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/serviceconfig" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -43,94 +47,181 @@ func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &pfConfig{} + if err := json.Unmarshal(js, cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn + cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if state.BalancerConfig != nil { + cfg, ok := state.BalancerConfig.(*pfConfig) + if !ok { + return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.sc.UpdateAddresses(cs.ResolverState.Addresses) - b.sc.Connect() + b.cfg = cfg + } + + if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, addrs) + return nil } + + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { - case connectivity.Ready, connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + switch state.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } + b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { } +func (b *pickfirstBalancer) ExitIdle() { + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() + } +} + type picker struct { result balancer.PickResult err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + func init() { balancer.Register(newPickfirstBuilder()) } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/preloader.go b/prometheus-to-sd/vendor/google.golang.org/grpc/preloader.go index 76acbbcc9..cd4554785 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/preloader.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/preloader.go @@ -25,7 +25,10 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type PreparedMsg struct { // Struct for preparing msg before sending them encodedData []byte diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/regenerate.sh b/prometheus-to-sd/vendor/google.golang.org/grpc/regenerate.sh index 987bc2025..a6f26c8ab 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/regenerate.sh +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/regenerate.sh @@ -26,8 +26,13 @@ export GOBIN=${WORKDIR}/bin export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} -echo "go install github.com/golang/protobuf/protoc-gen-go" -(cd test/tools && go install github.com/golang/protobuf/protoc-gen-go) +echo "remove existing generated files" +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') + +echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" +(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) echo "go install cmd/protoc-gen-go-grpc" (cd cmd/protoc-gen-go-grpc && go install .) @@ -35,45 +40,84 @@ echo "go install cmd/protoc-gen-go-grpc" echo "git clone https://github.com/grpc/grpc-proto" git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto +echo "git clone https://github.com/protocolbuffers/protobuf" +git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf + +# Pull in code.proto as a proto dependency mkdir -p ${WORKDIR}/googleapis/google/rpc echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto mkdir -p ${WORKDIR}/out -SOURCES=( - ${WORKDIR}/googleapis/google/rpc/code.proto +# Generates sources without the embed requirement +LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + profiling/proto/service.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto +) + +# Generates only the new gRPC Service symbols +SOURCES=( + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto - $(git ls-files --exclude-standard --cached --others "*.proto") + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto ) -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config + +# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an +# import path of 'bar' in the generated code when 'foo.proto' is imported in +# one of the sources. +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing + for src in ${SOURCES[@]}; do echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},requireUnimplementedServers=false:${WORKDIR}/out \ + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + ${src} +done + +for src in ${LEGACY_SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ ${src} done # The go_package option in grpc/lookup/v1/rls.proto doesn't match the # current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 - -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc/service_config/service_config.proto does not have a go_package option. -cp ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/resolver/map.go b/prometheus-to-sd/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 000000000..efcb7f3ef --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value interface{} +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[Address]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + for i, entry := range l { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value interface{}) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + entryList[entry].value = value + return + } + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addrKey] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/resolver/resolver.go b/prometheus-to-sd/vendor/google.golang.org/grpc/resolver/resolver.go index 379275a2d..353c10b69 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,7 +22,10 @@ package resolver import ( "context" + "fmt" "net" + "net/url" + "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" @@ -38,8 +41,9 @@ var ( // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. b.Scheme will be -// used as the scheme registered with this builder. +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are @@ -93,7 +97,11 @@ const ( ) // Address represents a server the client connects to. -// This is the EXPERIMENTAL API and may be changed or extended in the future. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type Address struct { // Addr is the server address on which a connection will be established. Addr string @@ -112,9 +120,14 @@ type Address struct { ServerName string // Attributes contains arbitrary data about this address intended for - // consumption by the load balancing policy. + // consumption by the SubConn. Attributes *attributes.Attributes + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attributes do not affect SubConn + // creation, connection establishment, handshaking, etc. + BalancerAttributes *attributes.Attributes + // Type is the type of this address. // // Deprecated: use Attributes instead. @@ -127,6 +140,30 @@ type Address struct { Metadata interface{} } +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Type == o.Type && a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { @@ -177,7 +214,16 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. - UpdateState(State) + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. + UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling // ResolveNow on the Resolver with exponential backoff. @@ -200,25 +246,51 @@ type ClientConn interface { // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. -// It is parsed from the target string that gets passed into Dial or DialContext by the user. And -// grpc passes it to the resolver and the balancer. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. // -// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will -// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed -// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. // -// If the target does not contain a scheme, we will apply the default scheme, and set the Target to -// be the full target string. e.g. "foo.bar" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// Examples: // -// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the -// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target -// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - Scheme string + // Deprecated: use URL.Scheme instead. + Scheme string + // Deprecated: use URL.Host instead. Authority string - Endpoint string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL +} + +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") } // Builder creates a resolver that will be used to watch name resolution updates. @@ -228,8 +300,10 @@ type Builder interface { // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. Scheme() string } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/prometheus-to-sd/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 265002a75..b408b3688 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,184 +19,204 @@ package grpc import ( - "fmt" + "context" "strings" "sync" - "time" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) +// resolverStateUpdater wraps the single method used by ccResolverWrapper to +// report a state update from the actual resolver implementation. +type resolverStateUpdater interface { + updateResolverState(s resolver.State, err error) error +} + // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn - resolverMu sync.Mutex - resolver resolver.Resolver - done *grpcsync.Event - curState resolver.State - - pollingMu sync.Mutex - polling chan struct{} + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc resolverStateUpdater + channelzID *channelz.Identifier + ignoreServiceConfig bool + opts ccResolverWrapperOpts + serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. + serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). + + // All incoming (resolver --> gRPC) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the serializer. + // Fields accessed *only* in these serializer callbacks, can therefore be + // accessed without a mutex. + curState resolver.State + + // mu guards access to the below fields. + mu sync.Mutex + closed bool + resolver resolver.Resolver // Accessed only from outgoing calls. +} + +// ccResolverWrapperOpts wraps the arguments to be passed when creating a new +// ccResolverWrapper. +type ccResolverWrapperOpts struct { + target resolver.Target // User specified dial target to resolve. + builder resolver.Builder // Resolver builder to use. + bOpts resolver.BuildOptions // Resolver build options to use. + channelzID *channelz.Identifier // Channelz identifier for the channel. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { +func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { + ctx, cancel := context.WithCancel(context.Background()) ccr := &ccResolverWrapper{ - cc: cc, - done: grpcsync.NewEvent(), - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - rbo := resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - } - - var err error - // We need to hold the lock here while we assign to the ccr.resolver field - // to guard against a data race caused by the following code path, - // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up - // accessing ccr.resolver which is being assigned here. - ccr.resolverMu.Lock() - defer ccr.resolverMu.Unlock() - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + cc: cc, + channelzID: opts.channelzID, + ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + opts: opts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + + // Cannot hold the lock at build time because the resolver can send an + // update or error inline and these incoming calls grab the lock to schedule + // a callback in the serializer. + r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { + cancel() return nil, err } + + // Any error reported by the resolver at build time that leads to a + // re-resolution request from the balancer is dropped by grpc until we + // return from this function. So, we don't have to handle pending resolveNow + // requests here. + ccr.mu.Lock() + ccr.resolver = r + ccr.mu.Unlock() + return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.resolverMu.Lock() - if !ccr.done.HasFired() { - ccr.resolver.ResolveNow(o) + ccr.mu.Lock() + defer ccr.mu.Unlock() + + // ccr.resolver field is set only after the call to Build() returns. But in + // the process of building, the resolver may send an error update which when + // propagated to the balancer may result in a re-resolution request. + if ccr.closed || ccr.resolver == nil { + return } - ccr.resolverMu.Unlock() + ccr.resolver.ResolveNow(o) } func (ccr *ccResolverWrapper) close() { - ccr.resolverMu.Lock() - ccr.resolver.Close() - ccr.done.Fire() - ccr.resolverMu.Unlock() -} - -// poll begins or ends asynchronous polling of the resolver based on whether -// err is ErrBadResolverState. -func (ccr *ccResolverWrapper) poll(err error) { - ccr.pollingMu.Lock() - defer ccr.pollingMu.Unlock() - if err != balancer.ErrBadResolverState { - // stop polling - if ccr.polling != nil { - close(ccr.polling) - ccr.polling = nil - } - return - } - if ccr.polling != nil { - // already polling + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() return } - p := make(chan struct{}) - ccr.polling = p - go func() { - for i := 0; ; i++ { - ccr.resolveNow(resolver.ResolveNowOptions{}) - t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) - select { - case <-p: - t.Stop() - return - case <-ccr.done.Done(): - // Resolver has been closed. - t.Stop() - return - case <-t.C: - select { - case <-p: - return - default: - } - // Timer expired; re-resolve. - } - } - }() + + channelz.Info(logger, ccr.channelzID, "Closing the name resolver") + + // Close the serializer to ensure that no more calls from the resolver are + // handled, before actually closing the resolver. + ccr.serializerCancel() + ccr.closed = true + r := ccr.resolver + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-ccr.serializer.Done + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. + go r.Close() } -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { +// serializerScheduleLocked is a convenience method to schedule a function to be +// run on the serializer while holding ccr.mu. +func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { + ccr.mu.Lock() + ccr.serializer.Schedule(f) + ccr.mu.Unlock() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + errCh := make(chan error, 1) + ok := ccr.serializer.Schedule(func(context.Context) { ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + errCh <- balancer.ErrBadResolverState + return + } + errCh <- nil + }) + if !ok { + // The only time when Schedule() fail to add the callback to the + // serializer is when the serializer is closed, and this happens only + // when the resolver wrapper is closed. + return nil } - ccr.curState = s - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + return <-errCh } +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - if ccr.done.HasFired() { - return - } - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) + }) } -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { + ccr.serializerScheduleLocked(func(_ context.Context) { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } - ccr.curState.Addresses = addrs - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) + }) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) - if ccr.cc.dopts.disableServiceConfig { - channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - ccr.poll(balancer.ErrBadResolverState) - return - } - if channelz.IsOn() { + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } - ccr.curState.ServiceConfig = scpr - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) + }) } +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig @@ -215,8 +235,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtINFO, - }) + channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/rpc_util.go b/prometheus-to-sd/vendor/google.golang.org/grpc/rpc_util.go index 8644b8a7d..2030736a3 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/rpc_util.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/rpc_util.go @@ -25,9 +25,7 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "math" - "net/url" "strings" "sync" "time" @@ -78,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { return &gzipCompressor{ pool: sync.Pool{ New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } @@ -144,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { z.Close() d.pool.Put(z) }() - return ioutil.ReadAll(z) + return io.ReadAll(z) } func (d *gzipDecompressor) Type() string { @@ -161,6 +159,7 @@ type callInfo struct { contentSubtype string codec baseCodec maxRetryRPCBufferSize int + onFinish []func(err error) } func defaultCallInfo() *callInfo { @@ -198,7 +197,11 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type HeaderCallOption struct { HeaderAddr *metadata.MD } @@ -216,7 +219,11 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type TrailerCallOption struct { TrailerAddr *metadata.MD } @@ -234,7 +241,11 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type PeerCallOption struct { PeerAddr *peer.Peer } @@ -247,7 +258,8 @@ func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { } // WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false, the RPC will fail +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will // retry the call if it fails due to a transient error. gRPC will not retry if @@ -269,7 +281,11 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type FailFastCallOption struct { FailFast bool } @@ -280,15 +296,55 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} + // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can receive. +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. func MaxCallRecvMsgSize(bytes int) CallOption { return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type MaxRecvMsgSizeCallOption struct { MaxRecvMsgSize int } @@ -300,14 +356,19 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can send. +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. func MaxCallSendMsgSize(bytes int) CallOption { return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type MaxSendMsgSizeCallOption struct { MaxSendMsgSize int } @@ -326,7 +387,11 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type PerRPCCredsCallOption struct { Creds credentials.PerRPCCredentials } @@ -341,13 +406,20 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func UseCompressor(name string) CallOption { return CompressorCallOption{CompressorType: name} } // CompressorCallOption is a CallOption that indicates the compressor to use. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type CompressorCallOption struct { CompressorType string } @@ -380,7 +452,11 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type ContentSubtypeCallOption struct { ContentSubtype string } @@ -391,9 +467,10 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { } func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} -// ForceCodec returns a CallOption that will set the given Codec to be -// used for all request and response messages for a call. The result of calling -// String() will be used as the content-subtype in a case-insensitive manner. +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. // // See Content-Type on // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for @@ -404,7 +481,10 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// This is an EXPERIMENTAL API. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func ForceCodec(codec encoding.Codec) CallOption { return ForceCodecCallOption{Codec: codec} } @@ -412,7 +492,10 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// This is an EXPERIMENTAL API. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type ForceCodecCallOption struct { Codec encoding.Codec } @@ -434,7 +517,10 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// This is an EXPERIMENTAL API. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type CustomCodecCallOption struct { Codec Codec } @@ -448,14 +534,21 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func MaxRetryRPCBufferSize(bytes int) CallOption { return MaxRetryRPCBufferSizeCallOption{bytes} } // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. -// This is an EXPERIMENTAL API. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type MaxRetryRPCBufferSizeCallOption struct { MaxRetryRPCBufferSize int } @@ -492,10 +585,11 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. @@ -600,12 +694,13 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - SentTime: t, + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + CompressedLength: len(payload), + SentTime: t, } } @@ -626,7 +721,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool } type payloadInfo struct { - wireLength int // The compressed length got from wire. + compressedLength int // The compressed length got from wire. uncompressedBytes []byte } @@ -636,7 +731,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, err } if payInfo != nil { - payInfo.wireLength = len(d) + payInfo.compressedLength = len(d) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -654,15 +749,13 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei d, size, err = decompress(compressor, d, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } - } else { - size = len(d) - } - if size > maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) } return d, nil } @@ -691,7 +784,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return d, len(d), err } @@ -704,7 +797,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf return err } if err := c.Unmarshal(d, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { payInfo.uncompressedBytes = d @@ -773,33 +866,45 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { - if err == nil || err == io.EOF { + switch err { + case nil, io.EOF: return err - } - if err == io.ErrUnexpectedEOF { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } - if _, ok := status.FromError(err); ok { - return err - } + switch e := err.(type) { case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } + case *transport.NewStreamError: + return toRPCErr(e.Err) + } + + if _, ok := status.FromError(err); ok { + return err } + return status.Error(codes.Unknown, err.Error()) } // setCallInfoCodec should only be called after CallOptions have been applied. func setCallInfoCodec(c *callInfo) error { if c.codec != nil { - // codec was already set by a CallOption; use it. + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.Codec); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } return nil } @@ -817,40 +922,6 @@ func setCallInfoCodec(c *callInfo) error { return nil } -// parseDialTarget returns the network and address to pass to dialer -func parseDialTarget(target string) (net string, addr string) { - net = "tcp" - - m1 := strings.Index(target, ":") - m2 := strings.Index(target, ":/") - - // handle unix:addr which will fail with url.Parse - if m1 >= 0 && m2 < 0 { - if n := target[0:m1]; n == "unix" { - net = n - addr = target[m1+1:] - return net, addr - } - } - if m2 >= 0 { - t, err := url.Parse(target) - if err != nil { - return net, target - } - scheme := t.Scheme - addr = t.Path - if scheme == "unix" { - net = scheme - if addr == "" { - addr = t.Host - } - return net, addr - } - } - - return net, target -} - // channelzData is used to store channelz related data for ClientConn, addrConn and Server. // These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic // operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. @@ -866,10 +937,9 @@ type channelzData struct { // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 6. +// support package version is 7. // -// Older versions are kept for compatibility. They may be removed if -// compatibility cannot be maintained. +// Older versions are kept for compatibility. // // These constants should not be referenced from any other code. const ( @@ -877,6 +947,7 @@ const ( SupportPackageIsVersion4 = true SupportPackageIsVersion5 = true SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true ) const grpcUA = "grpc-go/" + Version diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/server.go b/prometheus-to-sd/vendor/google.golang.org/grpc/server.go index 1b56cc2d1..8869cc906 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/server.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/server.go @@ -40,10 +40,11 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -56,8 +57,32 @@ import ( const ( defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" ) +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) + } + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + globalServerOptions = append(globalServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + globalServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption +} + var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") @@ -80,43 +105,41 @@ type ServiceDesc struct { Metadata interface{} } -// service consists of the information of the server serving this service and -// the methods in this service. -type service struct { - server interface{} // the server for service methods - md map[string]*MethodDesc - sd map[string]*StreamDesc - mdata interface{} -} - -type serverWorkerData struct { - st transport.ServerTransport - wg *sync.WaitGroup - stream *transport.Stream +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl interface{} + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata interface{} } // Server is a gRPC server to serve RPC requests. type Server struct { opts serverOptions - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool - serve bool - drain bool - cv *sync.Cond // signaled when connections close for GracefulStop - m map[string]*service // service name -> service info - events trace.EventLog + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events trace.EventLog quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData - serverWorkerChannels []chan *serverWorkerData + serverWorkerChannel chan func() } type serverOptions struct { @@ -128,8 +151,9 @@ type serverOptions struct { streamInt StreamServerInterceptor chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -147,12 +171,14 @@ type serverOptions struct { } var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } +var globalServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -162,7 +188,10 @@ type ServerOption interface { // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type EmptyServerOption struct{} func (EmptyServerOption) apply(*serverOptions) {} @@ -183,10 +212,27 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { } } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -194,11 +240,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -255,6 +300,35 @@ func CustomCodec(codec Codec) ServerOption { }) } +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + // RPCCompressor returns a ServerOption that sets a compressor for outbound // messages. For backward compatibility, all outbound messages will be sent // using this compressor, regardless of incoming message compression. By @@ -308,6 +382,9 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -365,6 +442,11 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func InTapHandle(h tap.ServerInHandle) ServerOption { return newFuncServerOption(func(o *serverOptions) { if o.inTapHandle != nil { @@ -377,7 +459,21 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl }) } @@ -404,7 +500,10 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func ConnectionTimeout(d time.Duration) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.connectionTimeout = d @@ -422,7 +521,10 @@ func MaxHeaderListSize(s uint32) ServerOption { // HeaderTableSize returns a ServerOption that sets the size of dynamic // header table for stream. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func HeaderTableSize(s uint32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.headerTableSize = &s @@ -434,7 +536,10 @@ func HeaderTableSize(s uint32) ServerOption { // zero (default) will disable workers and spawn a new goroutine for each // stream. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func NumStreamWorkers(numServerWorkers uint32) ServerOption { // TODO: If/when this API gets stabilized (i.e. stream workers become the // only way streams are processed), change the behavior of the zero value to @@ -453,57 +558,53 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { const serverWorkerResetThreshold = 1 << 16 // serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows different requests to be +// data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // // [1] https://github.com/golang/go/issues/18138 -func (s *Server) serverWorker(ch chan *serverWorkerData) { - // To make sure all server workers don't reset at the same time, choose a - // random number of iterations before resetting. - threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) - for completed := 0; completed < threshold; completed++ { - data, ok := <-ch +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + f, ok := <-s.serverWorkerChannel if !ok { return } - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) - data.wg.Done() + f() } - go s.serverWorker(ch) + go s.serverWorker() } -// initServerWorkers creates worker goroutines and channels to process incoming +// initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + s.serverWorkerChannel = make(chan func()) for i := uint32(0); i < s.opts.numServerWorkers; i++ { - s.serverWorkerChannels[i] = make(chan *serverWorkerData) - go s.serverWorker(s.serverWorkerChannels[i]) + go s.serverWorker() } } func (s *Server) stopServerWorkers() { - for i := uint32(0); i < s.opts.numServerWorkers; i++ { - close(s.serverWorkerChannels[i]) - } + close(s.serverWorkerChannel) } // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range globalServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[transport.ServerTransport]bool), - m: make(map[string]*service), - quit: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - czData: new(channelzData), + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) @@ -517,9 +618,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -539,14 +639,29 @@ func (s *Server) errorf(format string, a ...interface{}) { } } +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl interface{}) +} + // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before -// invoking Serve. +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { - ht := reflect.TypeOf(sd.HandlerType).Elem() - st := reflect.TypeOf(ss) - if !st.Implements(ht) { - logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } } s.register(sd, ss) } @@ -558,24 +673,24 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { if s.serve { logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) } - if _, ok := s.m[sd.ServiceName]; ok { + if _, ok := s.services[sd.ServiceName]; ok { logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } - srv := &service{ - server: ss, - md: make(map[string]*MethodDesc), - sd: make(map[string]*StreamDesc), - mdata: sd.Metadata, + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, } for i := range sd.Methods { d := &sd.Methods[i] - srv.md[d.MethodName] = d + info.methods[d.MethodName] = d } for i := range sd.Streams { d := &sd.Streams[i] - srv.sd[d.StreamName] = d + info.streams[d.StreamName] = d } - s.m[sd.ServiceName] = srv + s.services[sd.ServiceName] = info } // MethodInfo contains the information of an RPC including its method name and type. @@ -599,16 +714,16 @@ type ServiceInfo struct { // Service names include the package names, in the form of .. func (s *Server) GetServiceInfo() map[string]ServiceInfo { ret := make(map[string]ServiceInfo) - for n, srv := range s.m { - methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) - for m := range srv.md { + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { methods = append(methods, MethodInfo{ Name: m, IsClientStream: false, IsServerStream: false, }) } - for m, d := range srv.sd { + for m, d := range srv.streams { methods = append(methods, MethodInfo{ Name: m, IsClientStream: d.ClientStreams, @@ -628,16 +743,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { // the server being stopped. var ErrServerStopped = errors.New("grpc: the server has been stopped") -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -649,9 +757,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -684,11 +791,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -698,8 +800,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -743,7 +853,7 @@ func (s *Server) Serve(lis net.Listener) error { // s.conns before this conn can be added. s.serveWG.Add(1) go func() { - s.handleRawConn(rawConn) + s.handleRawConn(lis.Addr().String(), rawConn) s.serveWG.Done() }() } @@ -751,51 +861,47 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn forks a goroutine to handle a just-accepted connection that // has not had any I/O performed on it yet. -func (s *Server) handleRawConn(rawConn net.Conn) { +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { if s.quit.HasFired() { rawConn.Close() return } rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. - if err != credentials.ErrConnDispatched { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - rawConn.Close() - } - rawConn.SetDeadline(time.Time{}) - return - } // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) if st == nil { return } - rawConn.SetDeadline(time.Time{}) - if !s.addConn(st) { + if !s.addConn(lisAddr, st) { return } go func() { s.serveStreams(st) - s.removeConn(st) + s.removeConn(lisAddr, st) }() } +func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { + st.Drain("") + } + s.mu.Unlock() +} + // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { config := &transport.ServerConfig{ MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -806,13 +912,20 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } - st, err := transport.NewServerTransport("http2", c, config) + st, err := transport.NewServerTransport(c, config) if err != nil { s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() - c.Close() - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } return nil } @@ -820,29 +933,29 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup - var roundRobinCounter uint32 + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) + + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + } + if s.opts.numServerWorkers > 0 { - data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + case s.serverWorkerChannel <- f: + return default: // If all stream workers are busy, fallback to the default code path. - go func() { - s.handleStream(st, stream, s.traceInfo(st, stream)) - wg.Done() - }() } - } else { - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() } + go f() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -867,28 +980,33 @@ var _ http.Handler = (*Server)(nil) // To share one port (such as 443 for https) between gRPC and an // existing http.Handler, use a root http.Handler such as: // -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } // // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features -// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL -// and subject to change. +// available through grpc-go's HTTP/2 server. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } - if !s.addConn(st) { + if !s.addConn(listenerAddressForServeHTTP, st) { return } - defer s.removeConn(st) + defer s.removeConn(listenerAddressForServeHTTP, st) s.serveStreams(st) } @@ -916,27 +1034,40 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea return trInfo } -func (s *Server) addConn(st transport.ServerTransport) bool { +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") } - s.conns[st] = true + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true return true } -func (s *Server) removeConn(st transport.ServerTransport) { +func (s *Server) removeConn(addr string, st transport.ServerTransport) { s.mu.Lock() defer s.mu.Unlock() - if s.conns != nil { - delete(s.conns, st) + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } s.cv.Broadcast() } } @@ -980,8 +1111,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } } return err } @@ -1001,36 +1134,40 @@ func chainUnaryServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) - } + chainedInt = chainUnaryInterceptors(interceptors) } s.opts.unaryInt = chainedInt } -// getChainUnaryHandler recursively generate the chained UnaryHandler +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { if curr == len(interceptors)-1 { return finalHandler } - return func(ctx context.Context, req interface{}) (interface{}, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { - sh := s.opts.statsHandler - if sh != nil || trInfo != nil || channelz.IsOn() { +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - if sh != nil { + for _, sh := range shs { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1056,7 +1193,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.Finish() } - if sh != nil { + for _, sh := range shs { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1076,9 +1213,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ @@ -1098,7 +1242,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } - binlog.Log(logEntry) + for _, binlog := range binlogs { + binlog.Log(ctx, logEntry) + } } // comp and cp are used for compression. decomp and dc are used for @@ -1108,6 +1254,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor + var sendCompressorName string // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. @@ -1128,25 +1275,29 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp - stream.SetSendCompress(cp.Type()) + sendCompressorName = cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { - stream.SetSendCompress(rc) + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) - } + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } @@ -1157,19 +1308,23 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { + for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - WireLength: payInfo.wireLength, - Data: d, - Length: len(d), + RecvTime: time.Now(), + Payload: v, + Length: len(d), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Data: d, }) } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: d, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), cm) + } } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) @@ -1177,13 +1332,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return nil } ctx := NewContextWithServerTransportStream(stream.Context(), stream) - reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1192,18 +1348,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { + if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + } } - binlog.Log(&binarylog.ServerTrailer{ + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), st) + } } return appErr } @@ -1212,6 +1374,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } opts := &transport.Options{Last: true} + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). @@ -1229,26 +1396,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ + } + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), st) + } } return err } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ + } + sm := &binarylog.ServerMessage{ Message: reply, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), sm) + } } if channelz.IsOn() { t.IncrMsgSent() @@ -1259,14 +1434,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? - err = t.WriteStatus(stream, statusOK) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), st) + } } - return err + return t.WriteStatus(stream, statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1284,37 +1461,43 @@ func chainStreamServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) - } + chainedInt = chainStreamInterceptors(interceptors) } s.opts.streamInt = chainedInt } -// getChainStreamHandler recursively generate the chained StreamHandler +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { if curr == len(interceptors)-1 { return finalHandler } - - return func(srv interface{}, ss ServerStream) error { - return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } - sh := s.opts.statsHandler + shs := s.opts.statsHandlers var statsBegin *stats.Begin - if sh != nil { + if len(shs) != 0 { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) } - sh.HandleRPC(stream.Context(), statsBegin) } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ @@ -1326,10 +1509,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - if sh != nil || trInfo != nil || channelz.IsOn() { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1343,7 +1526,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } - if sh != nil { + if len(shs) != 0 { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1351,7 +1534,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } } if channelz.IsOn() { @@ -1364,8 +1549,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1384,7 +1576,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } - ss.binlog.Log(logEntry) + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), logEntry) + } } // If dc is set and matches the stream's compression, use it. Otherwise, try @@ -1406,22 +1600,30 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp - stream.SetSendCompress(s.opts.cp.Type()) + ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { - stream.SetSendCompress(rc) + ss.sendCompressorName = rc } } + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error var server interface{} - if srv != nil { - server = srv.server + if info != nil { + server = info.serviceImpl } if s.opts.streamInt == nil { appErr = sd.Handler(server, ss) @@ -1436,7 +1638,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1445,13 +1649,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.SetError() ss.mu.Unlock() } - t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), st) + } } + t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1460,14 +1667,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - err = t.WriteStatus(ss.s, statusOK) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), st) + } } - return err + return t.WriteStatus(ss.s, statusOK) } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { @@ -1482,7 +1691,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() @@ -1497,13 +1706,13 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - srv, knownService := s.m[service] + srv, knownService := s.services[service] if knownService { - if md, ok := srv.md[method]; ok { + if md, ok := srv.methods[method]; ok { s.processUnaryRPC(t, stream, srv, md, trInfo) return } - if sd, ok := srv.sd[method]; ok { + if sd, ok := srv.streams[method]; ok { s.processStreamingRPC(t, stream, srv, sd, trInfo) return } @@ -1541,7 +1750,10 @@ type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { return context.WithValue(ctx, streamKey{}, stream) } @@ -1553,7 +1765,10 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans // // See also NewContextWithServerTransportStream. // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. type ServerTransportStream interface { Method() string SetHeader(md metadata.MD) error @@ -1565,7 +1780,10 @@ type ServerTransportStream interface { // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // -// This API is EXPERIMENTAL. +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { s, _ := ctx.Value(streamKey{}).(ServerTransportStream) return s @@ -1584,16 +1802,12 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis s.lis = nil - st := s.conns + conns := s.conns s.conns = nil // interrupt GracefulStop if Stop and GracefulStop are called concurrently. s.cv.Broadcast() @@ -1602,8 +1816,10 @@ func (s *Server) Stop() { for lis := range listeners { lis.Close() } - for c := range st { - c.Close() + for _, cs := range conns { + for st := range cs { + st.Close(errors.New("Server.Stop called")) + } } if s.opts.numServerWorkers > 0 { s.stopServerWorkers() @@ -1624,11 +1840,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1640,8 +1852,10 @@ func (s *Server) GracefulStop() { } s.lis = nil if !s.drain { - for st := range s.conns { - st.Drain() + for _, conns := range s.conns { + for st := range conns { + st.Drain("graceful_stop") + } } s.drain = true } @@ -1679,12 +1893,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1696,8 +1924,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1709,8 +1943,66 @@ func SendHeader(ctx context.Context, md metadata.MD) error { return nil } +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil +} + // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1739,3 +2031,51 @@ type channelzServer struct { func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { return c.s.channelzMetric() } + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name, clientCompressors string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range strings.Split(clientCompressors, ",") { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if atomic.AddInt64(&q.n, -1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if atomic.AddInt64(&q.n, 1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)} +} diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/service_config.go b/prometheus-to-sd/vendor/google.golang.org/grpc/service_config.go index 5e434ca7f..0df11fc09 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/service_config.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/service_config.go @@ -23,8 +23,6 @@ import ( "errors" "fmt" "reflect" - "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -41,29 +39,7 @@ const maxInt = int(^uint(0) >> 1) // Deprecated: Users should not use this struct. Service config should be received // through name resolver, as specified here // https://github.com/grpc/grpc/blob/master/doc/service_config.md -type MethodConfig struct { - // WaitForReady indicates whether RPCs sent to this method should wait until - // the connection is ready by default (!failfast). The value specified via the - // gRPC client API will override the value set here. - WaitForReady *bool - // Timeout is the default timeout for RPCs sent to this method. The actual - // deadline used will be the minimum of the value specified here and the value - // set by the application via the gRPC client API. If either one is not set, - // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout *time.Duration - // MaxReqSize is the maximum allowed payload size for an individual request in a - // stream (client->server) in bytes. The size which is measured is the serialized - // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minimum of the value specified here and the value set - // by the application via the gRPC client API. If either one is not set, then the other - // will be used. If neither is set, then the built-in default is used. - MaxReqSize *int - // MaxRespSize is the maximum allowed payload size for an individual response in a - // stream (server->client) in bytes. - MaxRespSize *int - // RetryPolicy configures retry options for the method. - retryPolicy *retryPolicy -} +type MethodConfig = internalserviceconfig.MethodConfig type lbConfig struct { name string @@ -79,10 +55,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancerName will override this. This is deprecated; - // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig - // will be used. + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. LB *string // lbConfig is the service config's load balancing configuration. If @@ -127,38 +102,10 @@ type healthCheckConfig struct { ServiceName string } -// retryPolicy defines the go-native version of the retry policy defined by the -// service config here: -// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config -type retryPolicy struct { - // MaxAttempts is the maximum number of attempts, including the original RPC. - // - // This field is required and must be two or greater. - maxAttempts int - - // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initialBackoff). In general, the nth attempt will occur at - // random(0, - // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). - // - // These fields are required and must be greater than zero. - initialBackoff time.Duration - maxBackoff time.Duration - backoffMultiplier float64 - - // The set of status codes which may be retried. - // - // Status codes are specified as strings, e.g., "UNAVAILABLE". - // - // This field is required and must be non-empty. - // Note: a set is used to store this for easy lookup. - retryableStatusCodes map[codes.Code]bool -} - type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -180,50 +127,6 @@ type retryThrottlingPolicy struct { TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { Service string Method string @@ -252,7 +155,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -268,7 +171,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -277,7 +180,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -303,18 +206,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } - if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -334,13 +232,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} @@ -359,41 +257,33 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) return nil, nil } - rp := &retryPolicy{ - maxAttempts: jrp.MaxAttempts, - initialBackoff: *ib, - maxBackoff: *mb, - backoffMultiplier: jrp.BackoffMultiplier, - retryableStatusCodes: make(map[codes.Code]bool), + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: jrp.MaxAttempts, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.maxAttempts > 5 { + if rp.MaxAttempts > 5 { // TODO(retry): Make the max maxAttempts configurable. - rp.maxAttempts = 5 + rp.MaxAttempts = 5 } for _, code := range jrp.RetryableStatusCodes { - rp.retryableStatusCodes[code] = true + rp.RetryableStatusCodes[code] = true } return rp, nil } @@ -431,6 +321,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/prometheus-to-sd/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 187c30442..35e7a20a0 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -19,7 +19,10 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// This package is EXPERIMENTAL. +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. package serviceconfig // Config represents an opaque data structure holding a service config. diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/stats/stats.go b/prometheus-to-sd/vendor/google.golang.org/grpc/stats/stats.go index 63e476ee7..7a552a9b7 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/stats/stats.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/stats/stats.go @@ -36,15 +36,22 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC begins. +// Begin contains stats when an RPC attempt begins. // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool - // BeginTime is the time when the RPC begins. + // BeginTime is the time when the RPC attempt begins. BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool } // IsClient indicates if the stats information is from client side. @@ -60,10 +67,18 @@ type InPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int + // RecvTime is the time when the payload is received. RecvTime time.Time } @@ -122,9 +137,15 @@ type OutPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int // SentTime is the time when the payload is sent. SentTime time.Time diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/status/status.go b/prometheus-to-sd/vendor/google.golang.org/grpc/status/status.go index 01e182c30..bcf2e4d81 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/status/status.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/status/status.go @@ -29,6 +29,7 @@ package status import ( "context" + "errors" "fmt" spb "google.golang.org/genproto/googleapis/rpc/status" @@ -73,17 +74,52 @@ func FromProto(s *spb.Status) *Status { return status.FromProto(s) } -// FromError returns a Status representing err if it was produced from this -// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a -// Status is returned with codes.Unknown and the original error message. +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. +// +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus(), true + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + if gs.GRPCStatus() == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return gs.GRPCStatus(), true + } + var gs grpcstatus + if errors.As(err, &gs) { + if gs.GRPCStatus() == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := gs.GRPCStatus().Proto() + p.Message = err.Error() + return status.FromProto(p), true } return New(codes.Unknown, err.Error()), false } @@ -95,33 +131,30 @@ func Convert(err error) *Status { return s } -// Code returns the Code of the error if it is a Status error, codes.OK if err -// is nil, or codes.Unknown otherwise. +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus().Code() - } - return codes.Unknown + + return Convert(err).Code() } -// FromContextError converts a context error into a Status. It returns a -// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is -// non-nil and not a context error. +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. func FromContextError(err error) *Status { - switch err { - case nil: + if err == nil { return nil - case context.DeadlineExceeded: + } + if errors.Is(err, context.DeadlineExceeded) { return New(codes.DeadlineExceeded, err.Error()) - case context.Canceled: + } + if errors.Is(err, context.Canceled) { return New(codes.Canceled, err.Error()) - default: - return New(codes.Unknown, err.Error()) } + return New(codes.Unknown, err.Error()) } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/stream.go b/prometheus-to-sd/vendor/google.golang.org/grpc/stream.go index fbc3fb11c..10092685b 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/stream.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/stream.go @@ -36,6 +36,10 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -44,20 +48,28 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error -// StreamDesc represents a streaming RPC service's method specification. +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. type StreamDesc struct { - StreamName string - Handler StreamHandler - - // At least one of these is true. - ServerStreams bool - ClientStreams bool + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends } // Stream defines the common interface a client or server stream has to satisfy. @@ -111,6 +123,9 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On @@ -129,17 +144,22 @@ type ClientStream interface { // To ensure resources are not leaked due to the stream returned, one of the following // actions must be performed: // -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. // // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return nil, err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -156,6 +176,20 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // validate md + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -164,13 +198,55 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } - c := defaultCallInfo() // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. if err := cc.waitForResolvedAddrs(ctx); err != nil { return nil, err } - mc := cc.GetMethodConfig(method) + + var mc serviceconfig.MethodConfig + var onCommit func() + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } + return nil, toRPCErr(err) + } + + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } + } + + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() if mc.WaitForReady != nil { c.failFast = !*mc.WaitForReady } @@ -207,6 +283,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth Host: cc.authority, Method: method, ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, } // Set our outgoing compression according to the UseCompressor CallOption, if @@ -230,33 +307,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if c.creds != nil { callHdr.Creds = c.creds } - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - } - ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) - sh := cc.dopts.copts.StatsHandler - var beginTime time.Time - if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) - beginTime = time.Now() - begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, - } - sh.HandleRPC(ctx, begin) - } cs := &clientStream{ callHdr: callHdr, @@ -270,28 +320,41 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth cp: cp, comp: comp, cancel: cancel, - beginTime: beginTime, firstAttempt: true, + onCommit: onCommit, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } - cs.binlog = binarylog.GetMethodLogger(method) - - // Only this initial attempt has stats/tracing. - // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. - if err := cs.newAttemptLocked(sh, trInfo); err != nil { - cs.finish(err) - return nil, err + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } - if cs.binlog != nil { + if len(cs.binlogs) != 0 { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, @@ -305,7 +368,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth logEntry.Timeout = 0 } } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } if desc != unaryStreamDesc { @@ -326,63 +391,123 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { - newAttempt := &csAttempt{ - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + var beginTime time.Time + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, } - }() + sh.HandleRPC(ctx, begin) + } - if err := cs.ctx.Err(); err != nil { - return toRPCErr(err) + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) } - ctx := cs.ctx - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. - ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries - s, err := a.t.NewStream(cs.ctx, cs.callHdr) + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metadata != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metadata) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - if _, ok := err.(transport.PerformedIOError); ok { - // Return without converting to an RPC error so retry code can - // inspect. + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. return err } - return toRPCErr(err) + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -400,8 +525,7 @@ type clientStream struct { cancel context.CancelFunc // cancels all attempts - sentLast bool // sent an end stream - beginTime time.Time + sentLast bool // sent an end stream methodConfig *MethodConfig @@ -409,7 +533,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlogs []binarylog.MethodLogger // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -432,7 +556,8 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? + committed bool // active attempt committed for retry? + onCommit func() buffer []func(a *csAttempt) error // operations to replay on retry bufferSize int // current size of buffer } @@ -440,11 +565,12 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - done func(balancer.DoneInfo) + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult finished bool dc Decompressor @@ -457,10 +583,19 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } cs.committed = true cs.buffer = nil } @@ -472,85 +607,76 @@ func (cs *clientStream) commitAttempt() { } // shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. -func (cs *clientStream) shouldRetry(err error) error { - unprocessed := false - if cs.attempt.s == nil { - pioErr, ok := err.(transport.PerformedIOError) - if ok { - // Unwrap error. - err = toRPCErr(pioErr.Err) - } else { - unprocessed = true - } - if !ok && !cs.callInfo.failFast { - // In the event of a non-IO operation error from NewStream, we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil - } +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs + + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. + return false, err } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. - return err + if a.s == nil && a.allowTransparentRetry { + return true, nil } // Wait for the trailers. - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + unprocessed := false + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - return nil + return true, nil } if cs.cc.dopts.disableRetry { - return err + return false, err } pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { - return err + if a.s != nil { + if !a.s.TrailersOnly() { + return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } hasPushback = true } else if len(sps) > 1 { channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } - rp := cs.methodConfig.retryPolicy - if rp == nil || !rp.retryableStatusCodes[code] { - return err + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { + return false, err } // Note: the ordering here is important; we count this as a failure // only if the code matched a retryable code. if cs.retryThrottler.throttle() { - return err + return false, err } - if cs.numRetries+1 >= rp.maxAttempts { - return err + if cs.numRetries+1 >= rp.MaxAttempts { + return false, err } var dur time.Duration @@ -558,9 +684,9 @@ func (cs *clientStream) shouldRetry(err error) error { dur = time.Millisecond * time.Duration(pushback) cs.numRetriesSincePushback = 0 } else { - fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.initialBackoff) * fact - if max := float64(rp.maxBackoff); cur > max { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { cur = max } dur = time.Duration(grpcrand.Int63n(int64(cur))) @@ -573,26 +699,32 @@ func (cs *clientStream) shouldRetry(err error) error { select { case <-t.C: cs.numRetries++ - return nil + return false, nil case <-cs.ctx.Done(): t.Stop() - return status.FromContextError(cs.ctx.Err()).Err() + return false, status.FromContextError(cs.ctx.Err()).Err() } } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(lastErr) - if err := cs.shouldRetry(lastErr); err != nil { + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) + if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(nil, nil); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -602,7 +734,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -610,7 +745,23 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) for { if cs.committed { cs.mu.Unlock() - return op(cs.attempt) + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) + } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } } a := cs.attempt cs.mu.Unlock() @@ -628,7 +779,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -637,17 +788,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD + noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } return toRPCErr(err) }, cs.commitAttemptLocked) + if err != nil { cs.finish(err) return nil, err } - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, @@ -656,10 +815,12 @@ func (cs *clientStream) Header() (metadata.MD, error) { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -677,10 +838,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -728,47 +888,48 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, - }) + Message: data, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, cm) + } } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo - if cs.binlog != nil { + if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ OnClientSide: true, Message: recvInfo.uncompressedBytes, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, sm) + } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - if cs.binlog != nil { + if len(cs.binlogs) != 0 { // finish will not log Trailer. Log Trailer here. logEntry := &binarylog.ServerTrailer{ OnClientSide: true, @@ -781,7 +942,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } return err @@ -802,10 +965,13 @@ func (cs *clientStream) CloseSend() error { return nil } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, chc) + } } // We never returned an error here for reasons. return nil @@ -822,6 +988,9 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) @@ -838,10 +1007,13 @@ func (cs *clientStream) finish(err error) { // // Only one of cancel or trailer needs to be logged. In the cases where // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ + if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { + c := &binarylog.Cancel{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } } if err == nil { cs.retryThrottler.successfulRPC() @@ -874,8 +1046,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -885,7 +1057,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} } @@ -913,6 +1085,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return io.EOF // indicates successful end of stream. } + return toRPCErr(err) } if a.trInfo != nil { @@ -922,15 +1095,16 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -969,12 +1143,12 @@ func (a *csAttempt) finish(err error) { tr = a.s.Trailer() } - if a.done != nil { + if a.pickResult.Done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } - a.done(balancer.DoneInfo{ + a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, @@ -982,15 +1156,15 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, - BeginTime: a.cs.beginTime, + BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.cs.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1099,14 +1273,19 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin as.p = &parser{r: s} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) @@ -1295,8 +1474,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1328,6 +1509,9 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On @@ -1353,13 +1537,15 @@ type serverStream struct { comp encoding.Compressor decomp encoding.Compressor + sendCompressorName string + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler - binlog *binarylog.MethodLogger + binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1379,17 +1565,29 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } } return err } @@ -1398,6 +1596,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } @@ -1430,6 +1631,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { @@ -1443,20 +1651,28 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } } - ss.binlog.Log(&binarylog.ServerMessage{ + sm := &binarylog.ServerMessage{ Message: data, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sm) + } } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } } return nil } @@ -1490,13 +1706,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, chc) + } } return err } @@ -1505,20 +1724,26 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) + } } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: payInfo.uncompressedBytes, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, cm) + } } return nil } diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/tap/tap.go b/prometheus-to-sd/vendor/google.golang.org/grpc/tap/tap.go index 584360f68..bfa5dfa40 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/tap/tap.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/tap/tap.go @@ -17,7 +17,12 @@ */ // Package tap defines the function handles which are executed on the transport -// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +// layer of gRPC-Go and related information. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. package tap import ( @@ -32,16 +37,16 @@ type Info struct { // TODO: More to be added. } -// ServerInHandle defines the function which runs before a new stream is created -// on the server side. If it returns a non-nil error, the stream will not be -// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. -// The client will receive an RPC error "code = Unavailable, desc = stream -// terminated by RST_STREAM with error code: REFUSED_STREAM". +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. // // It's intended to be used in situations where you don't want to waste the -// resources to accept the new stream (e.g. rate-limiting). And the content of -// the error will be ignored and won't be sent back to the client. For other -// general usages, please use interceptors. +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. // // Note that it is executed in the per-connection I/O goroutine(s) instead of // per-RPC goroutine. Therefore, users should NOT have any diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/version.go b/prometheus-to-sd/vendor/google.golang.org/grpc/version.go index fb878dda9..3cc754062 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/version.go +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.31.0" +const Version = "1.56.3" diff --git a/prometheus-to-sd/vendor/google.golang.org/grpc/vet.sh b/prometheus-to-sd/vendor/google.golang.org/grpc/vet.sh index 8b7dff19a..a8e4732b3 100644 --- a/prometheus-to-sd/vendor/google.golang.org/grpc/vet.sh +++ b/prometheus-to-sd/vendor/google.golang.org/grpc/vet.sh @@ -28,34 +28,23 @@ cleanup() { } trap cleanup EXIT -PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" +PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +go version if [[ "$1" = "-install" ]]; then - # Check for module support - if go help mod >& /dev/null; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - else - # Ye olde `go get` incantation. - # Note: this gets the latest version of all tools (vs. the pinned versions - # with Go modules). - go get -u \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - fi + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.3.0 + if [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis + pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} unzip ${PROTOC_FILENAME} bin/protoc --version @@ -69,8 +58,20 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + make proto && git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then + exit 0 +fi + # - Ensure all source files contain a copyright message. -not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go @@ -83,44 +84,41 @@ not git grep -l 'x/net/context' -- "*.go" # thread safety. git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +# - Do not call grpclog directly. Use grpclog.Component instead. +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' + # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. -git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' | not grep -v 'pb "\|grpc "' - -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go - -# - gofmt, goimports, golint (with exceptions for generated code), go vet. -gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | not grep -vE "(_mock|\.pb)\.go" -golint ./... 2>&1 | not grep -vE "(_mock|\.pb)\.go:" -go vet -all ./... +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -# - Check that our modules are tidy. -if go help mod >& /dev/null; then - find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy' +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do + MOD_DIR=$(dirname ${MOD_FILE}) + pushd ${MOD_DIR} + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" + + go mod tidy -compat=1.17 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) -fi + popd +done # - Collection of static analysis checks # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. @@ -130,8 +128,11 @@ not grep -Fv '.CredsBundle .NewAddress .NewServiceConfig .Type is deprecated: use Attributes +BuildVersion is deprecated balancer.ErrTransientFailure balancer.Picker +extDesc.Filename is deprecated +github.com/golang/protobuf/jsonpb is deprecated grpc.CallCustomCodec grpc.Code grpc.Compressor @@ -144,7 +145,6 @@ grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor grpc.ServiceConfig -grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor grpc.WithDialer @@ -153,8 +153,26 @@ grpc.WithServiceConfig grpc.WithTimeout http.CloseNotifier info.SecurityVersion +proto is deprecated +proto.InternalMessageInfo is deprecated +proto.EnumName is deprecated +proto.ErrInternalBadWireType is deprecated +proto.FileDescriptor is deprecated +proto.Marshaler is deprecated +proto.MessageType is deprecated +proto.RegisterEnum is deprecated +proto.RegisterFile is deprecated +proto.RegisterType is deprecated +proto.RegisterExtension is deprecated +proto.RegisteredExtension is deprecated +proto.RegisteredExtensions is deprecated +proto.RegisterMapType is deprecated +proto.Unmarshaler is deprecated resolver.Backend -resolver.GRPCLB' "${SC_OUT}" +resolver.GRPCLB +Target is deprecated: Use the Target field in the BuildOptions instead. +xxx_messageInfo_ +' "${SC_OUT}" # - special golint on package comments. lint_package_comment_per_package() { diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 000000000..5f28148d8 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,665 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd protoreflect.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := protoreflect.FullName(name[1 : len(name)-1]) + extType, err := d.opts.Resolver.FindExtensionByName(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByTextName(name) + } + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +func isKnownValue(fd protoreflect.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd protoreflect.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value + var err error + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return protoreflect.Value{}, err + } + + kind := fd.Kind() + switch kind { + case protoreflect.BoolKind: + if tok.Kind() == json.Bool { + return protoreflect.ValueOfBool(tok.Bool()), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case protoreflect.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case protoreflect.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case protoreflect.StringKind: + if tok.Kind() == json.String { + return protoreflect.ValueOfString(tok.ParsedString()), nil + } + + case protoreflect.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case protoreflect.EnumKind: + if v, ok := unmarshalEnum(tok, fd); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getInt(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfInt32(int32(n)), true + } + return protoreflect.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getUint(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfUint32(uint32(n)), true + } + return protoreflect.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.NaN())), true + } + return protoreflect.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getFloat(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(n)), true + } + return protoreflect.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { + if tok.Kind() != json.String { + return protoreflect.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return protoreflect.Value{}, false + } + return protoreflect.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return protoreflect.ValueOfEnum(0), true + } + } + + return protoreflect.Value{}, false +} + +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(val) + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (protoreflect.Value, error) + switch fd.MapValue().Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return protoreflect.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (protoreflect.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + + mmap.Set(pkey, pval) + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil + + case protoreflect.BoolKind: + switch name { + case "true": + return protoreflect.ValueOfBool(true).MapKey(), nil + case "false": + return protoreflect.ValueOfBool(false).MapKey(), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 000000000..21d5d2cb1 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://protobuf.dev/programming-guides/proto3#json. +// +// This package produces a different output than the standard "encoding/json" +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 000000000..d09d22e13 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,343 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given proto.Message in JSON format using default options. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given proto.Message in the JSON format using options in +// MarshalOptions. Do not depend on the output being stable. It may change over +// time across different versions of the program. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return []byte("{}"), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} + +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { + return + } + m.FieldRanger.Range(f) +} + +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct{ protoreflect.Message } + +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil + if isProto2Scalar || isSingularMessage { + v = protoreflect.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) +} + +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { + return errors.New("no support for proto1 MessageSets") + } + + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + + var fields order.FieldRanger = m + if e.opts.EmitUnpopulated { + fields = unpopulatedFieldRanger{m} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + name := fd.JSONName() + if e.opts.UseProtoNames { + name = fd.TextName() + } + + if err = e.WriteName(name); err != nil { + return false + } + if err = e.marshalValue(v, fd); err != nil { + return false + } + return true + }) + return err +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case protoreflect.BoolKind: + e.WriteBool(val.Bool()) + + case protoreflect.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + e.WriteInt(val.Int()) + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + e.WriteUint(val.Uint()) + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case protoreflect.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case protoreflect.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case protoreflect.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case protoreflect.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case protoreflect.MessageKind, protoreflect.GroupKind: + if err := e.marshalMessage(val.Message(), ""); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false + } + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false + } + return true + }) + return err +} diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 000000000..6c37d4174 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,895 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, protoreflect.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, protoreflect.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalMessage(em, typeURL); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m protoreflect.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for objects and arrays. + switch tok.Kind() { + case json.ObjectOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + case json.Name: + // Skip object field value. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + + case json.ArrayOpen: + for { + tok, err := d.Peek() + if err != nil { + return err + } + switch tok.Kind() { + case json.ArrayClose: + d.Read() + return nil + default: + // Skip array item. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + } + return nil +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + return d.newError(tok.Pos(), `missing "value" field`) + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(protoreflect.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m protoreflect.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd protoreflect.FieldDescriptor + var val protoreflect.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = protoreflect.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = protoreflect.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = protoreflect.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !protoreflect.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(protoreflect.ValueOfString(s)) + } + return nil +} diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index ce57f57eb..f4b4686cf 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package protowire parses and formats the raw wire encoding. -// See https://developers.google.com/protocol-buffers/docs/encoding. +// See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, // use the "google.golang.org/protobuf/proto" package instead. @@ -29,12 +29,8 @@ const ( ) // IsValid reports whether the field number is semantically valid. -// -// Note that while numbers within the reserved range are semantically invalid, -// they are syntactically valid in the wire format. -// Implementations may treat records with reserved field numbers as unknown. func (n Number) IsValid() bool { - return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber + return MinValidNumber <= n && n <= MaxValidNumber } // Type represents the wire type. diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 000000000..d043a6ebe --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-length of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 000000000..2999d7133 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 000000000..f7fea7d8d --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 000000000..50578d659 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it panics. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 000000000..fbdf34873 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(indent string) (*Encoder, error) { + e := &Encoder{} + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatInt(n, 10)...) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatUint(n, 10)...) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 427c62d03..87853e786 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -412,12 +412,13 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { // Field number. Identify if input is a valid number that is not negative // and is decimal integer within 32-bit range. if num := parseNumber(d.in); num.size > 0 { + str := num.string(d.in) if !num.neg && num.kind == numDec { - if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { + if _, err := strconv.ParseInt(str, 10, 32); err == nil { return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil } } - return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) + return Token{}, d.newSyntaxError("invalid field number: %s", str) } return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go index 81a5d8c86..45c81f029 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -15,17 +15,12 @@ func (d *Decoder) parseNumberValue() (Token, bool) { if num.neg { numAttrs |= isNegative } - strSize := num.size - last := num.size - 1 - if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { - strSize = last - } tok := Token{ kind: Scalar, attrs: numberValue, pos: len(d.orig) - len(d.in), raw: d.in[:num.size], - str: string(d.in[:strSize]), + str: num.string(d.in), numAttrs: numAttrs, } d.consume(num.size) @@ -46,6 +41,27 @@ type number struct { kind uint8 neg bool size int + // if neg, this is the length of whitespace and comments between + // the minus sign and the rest fo the number literal + sep int +} + +func (num number) string(data []byte) string { + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') { + strSize = last + } + if num.neg && num.sep > 0 { + // strip whitespace/comments between negative sign and the rest + strLen := strSize - num.sep + str := make([]byte, strLen) + str[0] = data[0] + copy(str[1:], data[num.sep+1:strSize]) + return string(str) + } + return string(data[:strSize]) + } // parseNumber constructs a number object from given input. It allows for the @@ -67,19 +83,22 @@ func parseNumber(input []byte) number { } // Optional - + var sep int if s[0] == '-' { neg = true s = s[1:] size++ + // Consume any whitespace or comments between the + // negative sign and the rest of the number + lenBefore := len(s) + s = consume(s, 0) + sep = lenBefore - len(s) + size += sep if len(s) == 0 { return number{} } } - // C++ allows for whitespace and comments in between the negative sign and - // the rest of the number. This logic currently does not but is consistent - // with v1. - switch { case s[0] == '0': if len(s) > 1 { @@ -116,7 +135,7 @@ func parseNumber(input []byte) number { if len(s) > 0 && !isDelim(s[0]) { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } } s = s[1:] @@ -188,5 +207,5 @@ func parseNumber(input []byte) number { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index e3cdf1c20..5c0e8f73f 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -50,6 +50,7 @@ const ( FileDescriptorProto_Options_field_name protoreflect.Name = "options" FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + FileDescriptorProto_Edition_field_name protoreflect.Name = "edition" FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" @@ -63,6 +64,7 @@ const ( FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" + FileDescriptorProto_Edition_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.edition" ) // Field numbers for google.protobuf.FileDescriptorProto. @@ -79,6 +81,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 ) // Names for google.protobuf.DescriptorProto. @@ -494,26 +497,29 @@ const ( // Field names for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" - MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" - MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" - MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" - MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) // Field numbers for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 - MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 - MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 - MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.FieldOptions. @@ -528,16 +534,24 @@ const ( FieldOptions_Packed_field_name protoreflect.Name = "packed" FieldOptions_Jstype_field_name protoreflect.Name = "jstype" FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_UnverifiedLazy_field_name protoreflect.Name = "unverified_lazy" FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + FieldOptions_Retention_field_name protoreflect.Name = "retention" + FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_UnverifiedLazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.unverified_lazy" FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" + FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" + FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -547,8 +561,12 @@ const ( FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_UnverifiedLazy_field_number protoreflect.FieldNumber = 15 FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 + FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 + FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -564,6 +582,18 @@ const ( FieldOptions_JSType_enum_name = "JSType" ) +// Full and short names for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" + FieldOptions_OptionRetention_enum_name = "OptionRetention" +) + +// Full and short names for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" + FieldOptions_OptionTargetType_enum_name = "OptionTargetType" +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -590,20 +620,23 @@ const ( // Field names for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" - EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" - EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" - EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" - EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 - EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.EnumValueOptions. @@ -813,11 +846,13 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + GeneratedCodeInfo_Annotation_Semantic_field_name protoreflect.Name = "semantic" GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" + GeneratedCodeInfo_Annotation_Semantic_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.semantic" ) // Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. @@ -826,4 +861,11 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 + GeneratedCodeInfo_Annotation_Semantic_field_number protoreflect.FieldNumber = 5 +) + +// Full and short names for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" + GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/impl/convert.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/impl/convert.go index 11a6128ba..185ef2efa 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -59,7 +59,6 @@ func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { default: return newSingularConverter(t, fd) } - panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) } var ( diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index fea589c45..61a84d341 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -87,7 +87,7 @@ func (sb *Builder) grow(n int) { // Unlike strings.Builder, we do not need to copy over the contents // of the old buffer since our builder provides no API for // retrieving previously created strings. - sb.buf = make([]byte, 2*(cap(sb.buf)+n)) + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) } func (sb *Builder) last(n int) string { diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/version/version.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/version/version.go index b480c5010..f7014cd51 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 28 - Patch = 1 + Minor = 30 + Patch = 0 PreRelease = "" ) diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/proto/doc.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/proto/doc.go index 08d2a46f5..ec71e717f 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/proto/doc.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/proto/doc.go @@ -5,16 +5,13 @@ // Package proto provides functions operating on protocol buffer messages. // // For documentation on protocol buffers in general, see: -// -// https://developers.google.com/protocol-buffers +// https://protobuf.dev. // // For a tutorial on using protocol buffers with Go, see: -// -// https://developers.google.com/protocol-buffers/docs/gotutorial +// https://protobuf.dev/getting-started/gotutorial. // // For a guide to generated Go protocol buffer code, see: -// -// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// https://protobuf.dev/reference/go/go-generated. // // # Binary serialization // diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/proto/equal.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/proto/equal.go index 67948dd1d..1a0be1b03 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/proto/equal.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/proto/equal.go @@ -5,30 +5,39 @@ package proto import ( - "bytes" - "math" "reflect" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/reflect/protoreflect" ) -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. +// Equal reports whether two messages are equal, +// by recursively comparing the fields of the message. // -// Two messages are equal if they belong to the same message descriptor, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. If either of the top-level -// messages are invalid, then Equal reports true only if both are invalid. +// - Bytes fields are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. // -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. +// - Floating-point fields are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Other scalar fields are equal if they contain the same value. +// +// - Message fields are equal if they have +// the same set of populated known and extension field values, and +// the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +// +// An invalid message is not equal to a valid message. +// An invalid message is only equal to another invalid message of the +// same type. An invalid message often corresponds to a nil pointer +// of the concrete message type. For example, (*pb.M)(nil) is not equal +// to &pb.M{}. +// If two valid messages marshal to the same bytes under deterministic +// serialization, then Equal is guaranteed to report true. func Equal(x, y Message) bool { if x == nil || y == nil { return x == nil && y == nil @@ -42,130 +51,7 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } - return equalMessage(mx, my) -} - -// equalMessage compares two messages. -func equalMessage(mx, my protoreflect.Message) bool { - if mx.Descriptor() != my.Descriptor() { - return false - } - - nx := 0 - equal := true - mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { - nx++ - vy := my.Get(fd) - equal = my.Has(fd) && equalField(fd, vx, vy) - return equal - }) - if !equal { - return false - } - ny := 0 - my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { - ny++ - return true - }) - if nx != ny { - return false - } - - return equalUnknown(mx.GetUnknown(), my.GetUnknown()) -} - -// equalField compares two fields. -func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { - switch { - case fd.IsList(): - return equalList(fd, x.List(), y.List()) - case fd.IsMap(): - return equalMap(fd, x.Map(), y.Map()) - default: - return equalValue(fd, x, y) - } -} - -// equalMap compares two maps. -func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool { - if x.Len() != y.Len() { - return false - } - equal := true - x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { - vy := y.Get(k) - equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) - return equal - }) - return equal -} - -// equalList compares two lists. -func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool { - if x.Len() != y.Len() { - return false - } - for i := x.Len() - 1; i >= 0; i-- { - if !equalValue(fd, x.Get(i), y.Get(i)) { - return false - } - } - return true -} - -// equalValue compares two singular values. -func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { - switch fd.Kind() { - case protoreflect.BoolKind: - return x.Bool() == y.Bool() - case protoreflect.EnumKind: - return x.Enum() == y.Enum() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, - protoreflect.Int64Kind, protoreflect.Sint64Kind, - protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: - return x.Int() == y.Int() - case protoreflect.Uint32Kind, protoreflect.Uint64Kind, - protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: - return x.Uint() == y.Uint() - case protoreflect.FloatKind, protoreflect.DoubleKind: - fx := x.Float() - fy := y.Float() - if math.IsNaN(fx) || math.IsNaN(fy) { - return math.IsNaN(fx) && math.IsNaN(fy) - } - return fx == fy - case protoreflect.StringKind: - return x.String() == y.String() - case protoreflect.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case protoreflect.MessageKind, protoreflect.GroupKind: - return equalMessage(x.Message(), y.Message()) - default: - return x.Interface() == y.Interface() - } -} - -// equalUnknown compares unknown fields by direct comparison on the raw bytes -// of each individual field number. -func equalUnknown(x, y protoreflect.RawFields) bool { - if len(x) != len(y) { - return false - } - if bytes.Equal([]byte(x), []byte(y)) { - return true - } - - mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) - my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) - for len(x) > 0 { - fnum, _, n := protowire.ConsumeField(x) - mx[fnum] = append(mx[fnum], x[:n]...) - x = x[n:] - } - for len(y) > 0 { - fnum, _, n := protowire.ConsumeField(y) - my[fnum] = append(my[fnum], y[:n]...) - y = y[n:] - } - return reflect.DeepEqual(mx, my) + vx := protoreflect.ValueOfMessage(mx) + vy := protoreflect.ValueOfMessage(my) + return vx.Equal(vy) } diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index b03c1223c..54ce326df 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,6 +35,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) + case 13: + b = p.appendSingularField(b, "edition", nil) } return b } @@ -236,6 +238,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 7: b = p.appendSingularField(b, "map_entry", nil) + case 11: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -279,6 +283,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "allow_alias", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) + case 6: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -345,10 +351,18 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "jstype", nil) case 5: b = p.appendSingularField(b, "lazy", nil) + case 15: + b = p.appendSingularField(b, "unverified_lazy", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) case 10: b = p.appendSingularField(b, "weak", nil) + case 16: + b = p.appendSingularField(b, "debug_redact", nil) + case 17: + b = p.appendSingularField(b, "retention", nil) + case 18: + b = p.appendSingularField(b, "target", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index f31981077..37601b781 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -148,7 +148,7 @@ type Message interface { // be preserved in marshaling or other operations. IsValid() bool - // ProtoMethods returns optional fast-path implementions of various operations. + // ProtoMethods returns optional fast-path implementations of various operations. // This method may return nil. // // The returned methods type is identical to diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go new file mode 100644 index 000000000..591652541 --- /dev/null +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -0,0 +1,168 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "bytes" + "fmt" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +// Equal reports whether v1 and v2 are recursively equal. +// +// - Values of different types are always unequal. +// +// - Bytes values are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. +// +// - Floating point values are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Enums are equal if they contain the same number. +// Since Value does not contain an enum descriptor, +// enum values do not consider the type of the enum. +// +// - Other scalar values are equal if they contain the same value. +// +// - Message values are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +func (v1 Value) Equal(v2 Value) bool { + return equalValue(v1, v2) +} + +func equalValue(x, y Value) bool { + eqType := x.typ == y.typ + switch x.typ { + case nilType: + return eqType + case boolType: + return eqType && x.Bool() == y.Bool() + case int32Type, int64Type: + return eqType && x.Int() == y.Int() + case uint32Type, uint64Type: + return eqType && x.Uint() == y.Uint() + case float32Type, float64Type: + return eqType && equalFloat(x.Float(), y.Float()) + case stringType: + return eqType && x.String() == y.String() + case bytesType: + return eqType && bytes.Equal(x.Bytes(), y.Bytes()) + case enumType: + return eqType && x.Enum() == y.Enum() + default: + switch x := x.Interface().(type) { + case Message: + y, ok := y.Interface().(Message) + return ok && equalMessage(x, y) + case List: + y, ok := y.Interface().(List) + return ok && equalList(x, y) + case Map: + y, ok := y.Interface().(Map) + return ok && equalMap(x, y) + default: + panic(fmt.Sprintf("unknown type: %T", x)) + } + } +} + +// equalFloat compares two floats, where NaNs are treated as equal. +func equalFloat(x, y float64) bool { + if math.IsNaN(x) || math.IsNaN(y) { + return math.IsNaN(x) && math.IsNaN(y) + } + return x == y +} + +// equalMessage compares two messages. +func equalMessage(mx, my Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd FieldDescriptor, vx Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalValue(vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd FieldDescriptor, vx Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalList compares two lists. +func equalList(x, y List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalMap compares two maps. +func equalMap(x, y Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k MapKey, vx Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(vx, vy) + return equal + }) + return equal +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[FieldNumber]RawFields) + my := make(map[FieldNumber]RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index ca8e28c5b..08e5ef73f 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -54,11 +54,11 @@ import ( // // Append a 0 to a "repeated int32" field. // // Since the Value returned by Mutable is guaranteed to alias // // the source message, modifying the Value modifies the message. -// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// message.Mutable(fieldDesc).List().Append(protoreflect.ValueOfInt32(0)) // // // Assign [0] to a "repeated int32" field by creating a new Value, // // modifying it, and assigning it. -// list := message.NewField(fieldDesc).(List) +// list := message.NewField(fieldDesc).List() // list.Append(protoreflect.ValueOfInt32(0)) // message.Set(fieldDesc, list) // // ERROR: Since it is not defined whether Set aliases the source, diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 58352a697..aeb559774 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -46,7 +46,7 @@ var conflictPolicy = "panic" // "panic" | "warn" | "ignore" // It is a variable so that the behavior is easily overridden in another file. var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" - const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + const faq = "https://protobuf.dev/reference/go/faq#namespace-conflict" policy := conflictPolicy if v := os.Getenv(env); v != "" { policy = v diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index abe4ab511..dac5671db 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -406,6 +406,152 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} } +// If set to RETENTION_SOURCE, the option will be omitted from the binary. +// Note: as of January 2023, support for this is in progress and does not yet +// have an effect (b/264593489). +type FieldOptions_OptionRetention int32 + +const ( + FieldOptions_RETENTION_UNKNOWN FieldOptions_OptionRetention = 0 + FieldOptions_RETENTION_RUNTIME FieldOptions_OptionRetention = 1 + FieldOptions_RETENTION_SOURCE FieldOptions_OptionRetention = 2 +) + +// Enum value maps for FieldOptions_OptionRetention. +var ( + FieldOptions_OptionRetention_name = map[int32]string{ + 0: "RETENTION_UNKNOWN", + 1: "RETENTION_RUNTIME", + 2: "RETENTION_SOURCE", + } + FieldOptions_OptionRetention_value = map[string]int32{ + "RETENTION_UNKNOWN": 0, + "RETENTION_RUNTIME": 1, + "RETENTION_SOURCE": 2, + } +) + +func (x FieldOptions_OptionRetention) Enum() *FieldOptions_OptionRetention { + p := new(FieldOptions_OptionRetention) + *p = x + return p +} + +func (x FieldOptions_OptionRetention) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +} + +func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[5] +} + +func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionRetention) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionRetention(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionRetention.Descriptor instead. +func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 2} +} + +// This indicates the types of entities that the field may apply to when used +// as an option. If it is unset, then the field may be freely used as an +// option on any kind of entity. Note: as of January 2023, support for this is +// in progress and does not yet have an effect (b/264593489). +type FieldOptions_OptionTargetType int32 + +const ( + FieldOptions_TARGET_TYPE_UNKNOWN FieldOptions_OptionTargetType = 0 + FieldOptions_TARGET_TYPE_FILE FieldOptions_OptionTargetType = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE FieldOptions_OptionTargetType = 2 + FieldOptions_TARGET_TYPE_MESSAGE FieldOptions_OptionTargetType = 3 + FieldOptions_TARGET_TYPE_FIELD FieldOptions_OptionTargetType = 4 + FieldOptions_TARGET_TYPE_ONEOF FieldOptions_OptionTargetType = 5 + FieldOptions_TARGET_TYPE_ENUM FieldOptions_OptionTargetType = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY FieldOptions_OptionTargetType = 7 + FieldOptions_TARGET_TYPE_SERVICE FieldOptions_OptionTargetType = 8 + FieldOptions_TARGET_TYPE_METHOD FieldOptions_OptionTargetType = 9 +) + +// Enum value maps for FieldOptions_OptionTargetType. +var ( + FieldOptions_OptionTargetType_name = map[int32]string{ + 0: "TARGET_TYPE_UNKNOWN", + 1: "TARGET_TYPE_FILE", + 2: "TARGET_TYPE_EXTENSION_RANGE", + 3: "TARGET_TYPE_MESSAGE", + 4: "TARGET_TYPE_FIELD", + 5: "TARGET_TYPE_ONEOF", + 6: "TARGET_TYPE_ENUM", + 7: "TARGET_TYPE_ENUM_ENTRY", + 8: "TARGET_TYPE_SERVICE", + 9: "TARGET_TYPE_METHOD", + } + FieldOptions_OptionTargetType_value = map[string]int32{ + "TARGET_TYPE_UNKNOWN": 0, + "TARGET_TYPE_FILE": 1, + "TARGET_TYPE_EXTENSION_RANGE": 2, + "TARGET_TYPE_MESSAGE": 3, + "TARGET_TYPE_FIELD": 4, + "TARGET_TYPE_ONEOF": 5, + "TARGET_TYPE_ENUM": 6, + "TARGET_TYPE_ENUM_ENTRY": 7, + "TARGET_TYPE_SERVICE": 8, + "TARGET_TYPE_METHOD": 9, + } +) + +func (x FieldOptions_OptionTargetType) Enum() *FieldOptions_OptionTargetType { + p := new(FieldOptions_OptionTargetType) + *p = x + return p +} + +func (x FieldOptions_OptionTargetType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() +} + +func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[6] +} + +func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionTargetType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionTargetType(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionTargetType.Descriptor instead. +func (FieldOptions_OptionTargetType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 3} +} + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe // methods, and PUT verb for idempotent methods instead of the default POST. @@ -442,11 +588,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -468,6 +614,70 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +// Represents the identified object's effect on the element in the original +// .proto file. +type GeneratedCodeInfo_Annotation_Semantic int32 + +const ( + // There is no effect or the effect is indescribable. + GeneratedCodeInfo_Annotation_NONE GeneratedCodeInfo_Annotation_Semantic = 0 + // The element is set or otherwise mutated. + GeneratedCodeInfo_Annotation_SET GeneratedCodeInfo_Annotation_Semantic = 1 + // An alias to the element is returned. + GeneratedCodeInfo_Annotation_ALIAS GeneratedCodeInfo_Annotation_Semantic = 2 +) + +// Enum value maps for GeneratedCodeInfo_Annotation_Semantic. +var ( + GeneratedCodeInfo_Annotation_Semantic_name = map[int32]string{ + 0: "NONE", + 1: "SET", + 2: "ALIAS", + } + GeneratedCodeInfo_Annotation_Semantic_value = map[string]int32{ + "NONE": 0, + "SET": 1, + "ALIAS": 2, + } +) + +func (x GeneratedCodeInfo_Annotation_Semantic) Enum() *GeneratedCodeInfo_Annotation_Semantic { + p := new(GeneratedCodeInfo_Annotation_Semantic) + *p = x + return p +} + +func (x GeneratedCodeInfo_Annotation_Semantic) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() +} + +func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[8] +} + +func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GeneratedCodeInfo_Annotation_Semantic(num) + return nil +} + +// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. +func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} +} + // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { @@ -544,8 +754,12 @@ type FileDescriptorProto struct { // development tools. SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. - // The supported values are "proto2" and "proto3". + // The supported values are "proto2", "proto3", and "editions". + // + // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + // The edition of the proto file, which is an opaque string. + Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -664,6 +878,13 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } +func (x *FileDescriptorProto) GetEdition() string { + if x != nil && x.Edition != nil { + return *x.Edition + } + return "" +} + // Describes a message type. type DescriptorProto struct { state protoimpl.MessageState @@ -860,7 +1081,6 @@ type FieldDescriptorProto struct { // For booleans, "true" or "false". // For strings, contains the default text contents (not escaped in any way). // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` // If set, gives the index of a oneof in the containing type's oneof_decl // list. This field is a member of that oneof. @@ -1382,22 +1602,22 @@ type FileOptions struct { // inappropriate because proto packages do not normally start with backwards // domain names. JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). + // Controls the name of the wrapper Java class generated for the .proto file. + // That class will always contain the .proto file's getDescriptor() method as + // well as any top-level extensions defined in the .proto file. + // If java_multiple_files is disabled, then all the other classes from the + // .proto file will be nested inside the single wrapper outer class. JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java + // If enabled, then the Java code generator will generate a separate .java // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be + // file. Thus, these types will *not* be nested inside the wrapper class + // named by java_outer_classname. However, the wrapper class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 @@ -1531,7 +1751,7 @@ func (x *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { if x != nil && x.JavaGenerateEqualsAndHash != nil { return *x.JavaGenerateEqualsAndHash @@ -1670,10 +1890,12 @@ type MessageOptions struct { // efficient, has fewer features, and is more complicated. // // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } + // + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // // Note that the message cannot have any defined fields; MessageSets only // have extensions. // @@ -1692,28 +1914,44 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + // // Whether the message is an automatically generated map entry type for the // maps field. // // For maps fields: - // map map_field = 1; + // + // map map_field = 1; + // // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; + // + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; // // Implementations may choose not to generate the map_entry=true message, but // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // + // This should only be used as a temporary measure against broken builds due + // to the change in behavior for JSON field name conflicts. + // + // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // teams have had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -1785,6 +2023,14 @@ func (x *MessageOptions) GetMapEntry() bool { return false } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1838,7 +2084,6 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // // Note that implementations may choose not to check required fields within // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. @@ -1849,7 +2094,14 @@ type FieldOptions struct { // implementation must either *always* check its required fields, or *never* // check its required fields, regardless of whether or not the message has // been parsed. + // + // As of May 2022, lazy verifies the contents of the byte stream during + // parsing. An invalid byte stream will cause the overall parsing to fail. Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // unverified_lazy does no correctness checks on the byte stream. This should + // only be used where lazy with verification is prohibitive for performance + // reasons. + UnverifiedLazy *bool `protobuf:"varint,15,opt,name=unverified_lazy,json=unverifiedLazy,def=0" json:"unverified_lazy,omitempty"` // Is this field deprecated? // Depending on the target platform, this can emit Deprecated annotations // for accessors, or it will be completely ignored; in the very least, this @@ -1857,17 +2109,24 @@ type FieldOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // For Google-internal migration only. Do not use. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // Indicate that the field value should not be printed out when using debug + // formats, e.g. when the field contains sensitive credentials. + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for FieldOptions fields. const ( - Default_FieldOptions_Ctype = FieldOptions_STRING - Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL - Default_FieldOptions_Lazy = bool(false) - Default_FieldOptions_Deprecated = bool(false) - Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_UnverifiedLazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_DebugRedact = bool(false) ) func (x *FieldOptions) Reset() { @@ -1930,6 +2189,13 @@ func (x *FieldOptions) GetLazy() bool { return Default_FieldOptions_Lazy } +func (x *FieldOptions) GetUnverifiedLazy() bool { + if x != nil && x.UnverifiedLazy != nil { + return *x.UnverifiedLazy + } + return Default_FieldOptions_UnverifiedLazy +} + func (x *FieldOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -1944,6 +2210,27 @@ func (x *FieldOptions) GetWeak() bool { return Default_FieldOptions_Weak } +func (x *FieldOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_FieldOptions_DebugRedact +} + +func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { + if x != nil && x.Retention != nil { + return *x.Retention + } + return FieldOptions_RETENTION_UNKNOWN +} + +func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { + if x != nil && x.Target != nil { + return *x.Target + } + return FieldOptions_TARGET_TYPE_UNKNOWN +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2014,6 +2301,15 @@ type EnumOptions struct { // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2069,6 +2365,14 @@ func (x *EnumOptions) GetDeprecated() bool { return Default_EnumOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2399,43 +2703,48 @@ type SourceCodeInfo struct { // tools. // // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } + // + // message Foo { + // optional string foo = 1; + // } + // // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi + // + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). // // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` } @@ -2715,8 +3024,8 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". +// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents +// "foo.(bar.baz).moo". type UninterpretedOption_NamePart struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2781,23 +3090,34 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] + // the root FileDescriptorProto to the place where the definition occurs. + // For example, this path: + // + // [ 4, 3, 2, 7, 1 ] + // // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 + // + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; + // + // repeated DescriptorProto message_type = 4; + // // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; + // + // repeated FieldDescriptorProto field = 2; + // // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; + // + // optional string name = 1; // // Thus, the above path gives the location of a field name. If we removed // the last element: - // [ 4, 3, 2, 7 ] + // + // [ 4, 3, 2, 7 ] + // // this path refers to the whole field declaration (from the beginning // of the label to the terminating semicolon). Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` @@ -2826,34 +3146,34 @@ type SourceCodeInfo_Location struct { // // Examples: // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; + // // Comment attached to moo. + // // + // // Another line attached to moo. + // optional double moo = 4; // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. + // // Detached comment for corge. This is not leading or trailing comments + // // to moo or corge because there are blank lines separating it from + // // both. // - // // Detached comment for corge paragraph 2. + // // Detached comment for corge paragraph 2. // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; // - // // ignored detached comments. + // // ignored detached comments. LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` @@ -2940,9 +3260,10 @@ type GeneratedCodeInfo_Annotation struct { // that relates to the identified object. Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past + // relates to the identified object. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` } func (x *GeneratedCodeInfo_Annotation) Reset() { @@ -3005,6 +3326,13 @@ func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { return 0 } +func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotation_Semantic { + if x != nil && x.Semantic != nil { + return *x.Semantic + } + return GeneratedCodeInfo_Annotation_NONE +} + var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor var file_google_protobuf_descriptor_proto_rawDesc = []byte{ @@ -3016,7 +3344,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3054,330 +3382,391 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, - 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, - 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, + 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, - 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, - 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, - 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, - 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, - 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, - 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, - 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, - 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, - 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, - 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, - 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, - 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, - 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, - 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, - 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, - 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, - 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, - 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, + 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, + 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, + 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, + 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, + 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, + 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, + 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, + 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, + 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, + 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, + 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, + 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, + 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, + 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, + 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, + 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, + 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, + 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, + 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, + 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, + 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, + 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, + 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, - 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, - 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, - 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, - 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, - 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, - 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, - 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, + 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, + 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, + 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, + 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, + 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, + 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, + 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, - 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, + 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, + 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, 0x03, 0x0a, + 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, + 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, + 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, + 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xb7, 0x08, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, - 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, - 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, - 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, - 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, - 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, - 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, - 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, @@ -3385,97 +3774,95 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, + 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, + 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, + 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, + 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, + 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, + 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, - 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, - 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, - 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, - 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, - 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, - 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, - 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, - 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, + 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, + 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, + 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, + 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, + 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, @@ -3498,7 +3885,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 9) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type @@ -3506,84 +3893,90 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType - (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel - (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 16: google.protobuf.FileOptions - (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange - (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation + (FieldOptions_OptionRetention)(0), // 5: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 6: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 7: google.protobuf.MethodOptions.IdempotencyLevel + (GeneratedCodeInfo_Annotation_Semantic)(0), // 8: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 9: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 10: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 11: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 12: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 13: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 14: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 15: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 16: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 17: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 18: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 19: google.protobuf.FileOptions + (*MessageOptions)(nil), // 20: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 21: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 22: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 23: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 24: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 25: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 26: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 27: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 28: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 29: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 30: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 31: google.protobuf.DescriptorProto.ReservedRange + (*EnumDescriptorProto_EnumReservedRange)(nil), // 32: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 33: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 34: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 35: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 10, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 11, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 15, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 17, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 13, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 28, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 13, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 13, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 11, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 15, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 30, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 14, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 20, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 31, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 27, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 21, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 22, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 16, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 23, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 32, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 24, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 18, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 25, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 26, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 43, // [43:43] is the sub-list for method output_type - 43, // [43:43] is the sub-list for method input_type - 43, // [43:43] is the sub-list for extension type_name - 43, // [43:43] is the sub-list for extension extendee - 0, // [0:43] is the sub-list for field type_name + 5, // 32: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 6, // 33: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType + 27, // 34: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 35: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 36: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 37: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 38: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 7, // 39: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 27, // 40: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 33, // 41: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 34, // 42: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 35, // 43: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 12, // 44: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 8, // 45: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 46, // [46:46] is the sub-list for method output_type + 46, // [46:46] is the sub-list for method input_type + 46, // [46:46] is the sub-list for extension type_name + 46, // [46:46] is the sub-list for extension extendee + 0, // [0:46] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -3940,7 +4333,7 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 6, + NumEnums: 9, NumMessages: 27, NumExtensions: 0, NumServices: 0, diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 8c10797b9..a6c7a33f3 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -37,8 +37,7 @@ // It is functionally a tuple of the full name of the remote message type and // the serialized bytes of the remote message value. // -// -// Constructing an Any +// # Constructing an Any // // An Any message containing another message value is constructed using New: // @@ -48,8 +47,7 @@ // } // ... // make use of any // -// -// Unmarshaling an Any +// # Unmarshaling an Any // // With a populated Any message, the underlying message can be serialized into // a remote concrete message value in a few ways. @@ -95,8 +93,7 @@ // listed in the case clauses are linked into the Go binary and therefore also // registered in the global registry. // -// -// Type checking an Any +// # Type checking an Any // // In order to type check whether an Any message represents some other message, // then use the MessageIs method: @@ -115,7 +112,6 @@ // } // ... // make use of m // } -// package anypb import ( @@ -136,45 +132,49 @@ import ( // // Example 1: Pack and unpack a message in C++. // -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } // // Example 2: Pack and unpack a message in Java. // -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack @@ -182,35 +182,33 @@ import ( // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // +// # JSON // -// JSON -// ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: // -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } // -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } // // If the embedded message type is well-known and has a custom JSON // representation, that representation will be embedded adding a field // `value` which holds the custom JSON in addition to the `@type` // field. Example (for message [google.protobuf.Duration][]): // -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } type Any struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -228,14 +226,14 @@ type Any struct { // scheme `http`, `https`, or no scheme, one can optionally set up a type // server that maps type URLs to message definitions as follows: // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) + // - If no scheme is provided, `https` is assumed. + // - An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // - Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with @@ -243,7 +241,6 @@ type Any struct { // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. - // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index a583ca2f6..df709a8dd 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -35,8 +35,7 @@ // // The Duration message represents a signed span of time. // -// -// Conversion to a Go Duration +// # Conversion to a Go Duration // // The AsDuration method can be used to convert a Duration message to a // standard Go time.Duration value: @@ -65,15 +64,13 @@ // the resulting value to the closest representable value (e.g., math.MaxInt64 // for positive overflow and math.MinInt64 for negative overflow). // -// -// Conversion from a Go Duration +// # Conversion from a Go Duration // // The durationpb.New function can be used to construct a Duration message // from a standard Go time.Duration value: // // dur := durationpb.New(d) // ... // make use of d as a *durationpb.Duration -// package durationpb import ( @@ -96,43 +93,43 @@ import ( // // Example 1: Compute Duration from two Timestamps in pseudo code. // -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; // -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; // -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (duration.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } // // Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. // -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; // -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; // -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } // // Example 3: Compute Duration from datetime.timedelta in Python. // -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) // // # JSON Mapping // @@ -143,8 +140,6 @@ import ( // encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". -// -// type Duration struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index c9ae92132..61f69fc11 100644 --- a/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/prometheus-to-sd/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -36,8 +36,7 @@ // The Timestamp message represents a timestamp, // an instant in time since the Unix epoch (January 1st, 1970). // -// -// Conversion to a Go Time +// # Conversion to a Go Time // // The AsTime method can be used to convert a Timestamp message to a // standard Go time.Time value in UTC: @@ -59,8 +58,7 @@ // ... // handle error // } // -// -// Conversion from a Go Time +// # Conversion from a Go Time // // The timestamppb.New function can be used to construct a Timestamp message // from a standard Go time.Time value: @@ -72,7 +70,6 @@ // // ts := timestamppb.Now() // ... // make use of ts as a *timestamppb.Timestamp -// package timestamppb import ( @@ -101,52 +98,50 @@ import ( // // Example 1: Compute Timestamp from POSIX `time()`. // -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); // // Example 2: Compute Timestamp from POSIX `gettimeofday()`. // -// struct timeval tv; -// gettimeofday(&tv, NULL); +// struct timeval tv; +// gettimeofday(&tv, NULL); // -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); // // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. // -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); // // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. // -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// long millis = System.currentTimeMillis(); // +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); // // Example 5: Compute Timestamp from Java `Instant.now()`. // -// Instant now = Instant.now(); -// -// Timestamp timestamp = -// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) -// .setNanos(now.getNano()).build(); +// Instant now = Instant.now(); // +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); // // Example 6: Compute Timestamp from current time in Python. // -// timestamp = Timestamp() -// timestamp.GetCurrentTime() +// timestamp = Timestamp() +// timestamp.GetCurrentTime() // // # JSON Mapping // @@ -174,8 +169,6 @@ import ( // the Joda Time's [`ISODateTimeFormat.dateTime()`]( // http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D // ) to obtain a formatter capable of generating timestamps in this format. -// -// type Timestamp struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/prometheus-to-sd/vendor/modules.txt b/prometheus-to-sd/vendor/modules.txt index 09ea67737..c854c1fdd 100644 --- a/prometheus-to-sd/vendor/modules.txt +++ b/prometheus-to-sd/vendor/modules.txt @@ -1,10 +1,13 @@ -# cloud.google.com/go v0.65.0 -## explicit; go 1.11 +# cloud.google.com/go/compute v1.19.1 +## explicit; go 1.19 +cloud.google.com/go/compute/internal +# cloud.google.com/go/compute/metadata v0.2.3 +## explicit; go 1.19 cloud.google.com/go/compute/metadata # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile -# github.com/cespare/xxhash/v2 v2.1.1 +# github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/davecgh/go-spew v1.1.1 @@ -31,14 +34,17 @@ github.com/go-openapi/swag ## explicit; go 1.15 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b -## explicit +# github.com/golang/glog v1.1.0 +## explicit; go 1.18 github.com/golang/glog +github.com/golang/glog/internal/logsink +github.com/golang/glog/internal/stackdump # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.5.2 +# github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any @@ -61,9 +67,19 @@ github.com/google/go-cmp/cmp/internal/value # github.com/google/gofuzz v1.1.0 ## explicit; go 1.12 github.com/google/gofuzz -# github.com/googleapis/gax-go/v2 v2.0.5 +# github.com/google/uuid v1.3.0 ## explicit +github.com/google/uuid +# github.com/googleapis/enterprise-certificate-proxy v0.2.3 +## explicit; go 1.19 +github.com/googleapis/enterprise-certificate-proxy/client +github.com/googleapis/enterprise-certificate-proxy/client/util +# github.com/googleapis/gax-go/v2 v2.7.1 +## explicit; go 1.19 github.com/googleapis/gax-go/v2 +github.com/googleapis/gax-go/v2/apierror +github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/internal # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 github.com/josharian/intern @@ -108,10 +124,10 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/stretchr/testify v1.8.0 +# github.com/stretchr/testify v1.8.1 ## explicit; go 1.13 github.com/stretchr/testify/assert -# go.opencensus.io v0.22.4 +# go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal @@ -129,18 +145,17 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# golang.org/x/net v0.8.0 +# golang.org/x/net v0.9.0 ## explicit; go 1.17 golang.org/x/net/context -golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b -## explicit; go 1.11 +# golang.org/x/oauth2 v0.7.0 +## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/google @@ -148,16 +163,16 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.6.0 +# golang.org/x/sys v0.7.0 ## explicit; go 1.17 golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.6.0 +# golang.org/x/term v0.7.0 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.8.0 +# golang.org/x/text v0.9.0 ## explicit; go 1.17 golang.org/x/text/secure/bidirule golang.org/x/text/transform @@ -166,17 +181,18 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 ## explicit golang.org/x/time/rate -# google.golang.org/api v0.30.0 -## explicit; go 1.11 +# google.golang.org/api v0.114.0 +## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/internal +google.golang.org/api/internal/cert google.golang.org/api/internal/gensupport +google.golang.org/api/internal/impersonate google.golang.org/api/internal/third_party/uritemplates google.golang.org/api/monitoring/v3 google.golang.org/api/option google.golang.org/api/option/internaloption -google.golang.org/api/transport/cert google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation # google.golang.org/appengine v1.6.7 @@ -191,11 +207,13 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154 -## explicit; go 1.11 +# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 +## explicit; go 1.19 +google.golang.org/genproto/googleapis/rpc/code +google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.31.0 -## explicit; go 1.11 +# google.golang.org/grpc v1.56.3 +## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -204,15 +222,17 @@ google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials -google.golang.org/grpc/credentials/internal +google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer @@ -223,12 +243,17 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport +google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer @@ -237,14 +262,16 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.28.1 +# google.golang.org/protobuf v1.30.0 ## explicit; go 1.11 +google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text