From 90b0055bc3f241101307963ab173ae162421784f Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Wed, 5 Apr 2023 07:11:27 -0400 Subject: [PATCH] Update vendor of containers/image Signed-off-by: Daniel J Walsh --- go.mod | 26 +- go.sum | 73 +- .../asaskevich/govalidator/validator.go | 33 +- .../containers/image/v5/copy/blob.go | 22 +- .../containers/image/v5/copy/compression.go | 54 +- .../containers/image/v5/copy/copy.go | 1093 +----------- .../containers/image/v5/copy/encryption.go | 23 +- .../containers/image/v5/copy/manifest.go | 25 +- .../containers/image/v5/copy/multiple.go | 198 +++ .../containers/image/v5/copy/progress_bars.go | 6 +- .../containers/image/v5/copy/single.go | 818 +++++++++ .../image/v5/directory/directory_dest.go | 32 +- .../image/v5/directory/directory_src.go | 2 +- .../image/v5/directory/directory_transport.go | 2 +- .../v5/directory/explicitfilepath/path.go | 2 +- .../containers/image/v5/docker/archive/src.go | 4 +- .../image/v5/docker/archive/transport.go | 17 +- .../image/v5/docker/archive/writer.go | 2 +- .../containers/image/v5/docker/body_reader.go | 4 +- .../image/v5/docker/daemon/daemon_dest.go | 1 + .../image/v5/docker/daemon/daemon_src.go | 2 + .../v5/docker/daemon/daemon_transport.go | 2 +- .../image/v5/docker/docker_client.go | 22 +- .../image/v5/docker/docker_image.go | 6 +- .../image/v5/docker/docker_image_dest.go | 85 +- .../image/v5/docker/docker_image_src.go | 12 +- .../image/v5/docker/docker_transport.go | 2 +- .../containers/image/v5/docker/errors.go | 2 +- .../image/v5/docker/internal/tarfile/dest.go | 28 +- .../v5/docker/internal/tarfile/writer.go | 40 +- .../image/v5/docker/reference/normalize.go | 2 +- .../image/v5/docker/reference/reference.go | 6 +- .../image/v5/docker/reference/regexp.go | 3 +- .../image/v5/docker/registries_d.go | 16 +- .../image/v5/docker/wwwauthenticate.go | 2 +- .../image/v5/internal/image/docker_list.go | 2 +- .../image/v5/internal/image/docker_schema2.go | 2 +- .../image/v5/internal/image/oci_index.go | 2 +- .../image/v5/internal/image/sourced.go | 2 +- .../internal/imagedestination/impl/compat.go | 27 +- .../stubs/put_blob_partial.go | 4 +- .../v5/internal/imagedestination/wrapper.go | 30 +- .../image/v5/internal/manifest/common.go | 72 + .../v5/internal/manifest/docker_schema2.go | 15 + .../internal/manifest/docker_schema2_list.go | 260 +++ .../image/v5/internal/manifest/errors.go | 8 + .../image/v5/internal/manifest/list.go | 90 + .../image/v5/internal/manifest/manifest.go | 167 ++ .../image/v5/internal/manifest/oci_index.go | 329 ++++ .../internal/pkg/platform/platform_matcher.go | 11 +- .../image/v5/internal/private/private.go | 31 +- .../containers/image/v5/internal/set/set.go | 46 + .../image/v5/internal/signature/signature.go | 17 +- .../image/v5/internal/signature/sigstore.go | 27 +- .../image/v5/internal/signature/simple.go | 8 +- .../v5/internal/uploadreader/upload_reader.go | 2 +- .../containers/image/v5/manifest/common.go | 91 - .../image/v5/manifest/docker_schema1.go | 23 +- .../image/v5/manifest/docker_schema2.go | 16 +- .../image/v5/manifest/docker_schema2_list.go | 204 +-- .../containers/image/v5/manifest/list.go | 55 +- .../containers/image/v5/manifest/manifest.go | 141 +- .../containers/image/v5/manifest/oci.go | 23 +- .../containers/image/v5/manifest/oci_index.go | 217 +-- .../image/v5/oci/archive/oci_dest.go | 12 +- .../image/v5/oci/internal/oci_util.go | 8 +- .../image/v5/oci/layout/oci_dest.go | 40 +- .../containers/image/v5/oci/layout/oci_src.go | 3 +- .../image/v5/oci/layout/oci_transport.go | 2 +- .../image/v5/openshift/openshift-copies.go | 127 +- .../image/v5/openshift/openshift.go | 6 +- .../image/v5/openshift/openshift_dest.go | 29 +- .../image/v5/openshift/openshift_transport.go | 2 +- .../containers/image/v5/ostree/ostree_dest.go | 30 +- .../image/v5/ostree/ostree_transport.go | 17 +- .../v5/pkg/blobinfocache/memory/memory.go | 22 +- .../image/v5/pkg/compression/compression.go | 2 +- .../v5/pkg/compression/internal/types.go | 6 +- .../image/v5/pkg/docker/config/config.go | 44 +- .../v5/pkg/sysregistriesv2/shortnames.go | 5 +- .../sysregistriesv2/system_registries_v2.go | 8 +- .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 15 +- .../containers/image/v5/sif/transport.go | 2 +- .../containers/image/v5/signature/docker.go | 22 +- .../image/v5/signature/fulcio_cert.go | 88 +- .../image/v5/signature/internal/json.go | 20 +- .../image/v5/signature/internal/rekor_set.go | 12 +- .../v5/signature/internal/sigstore_payload.go | 60 +- .../image/v5/signature/policy_config.go | 26 +- .../v5/signature/policy_config_sigstore.go | 4 +- .../image/v5/signature/policy_eval.go | 2 +- .../v5/signature/policy_eval_signedby.go | 7 +- .../image/v5/signature/sigstore/copied.go | 14 +- .../containers/image/v5/signature/simple.go | 74 +- .../image/v5/storage/storage_dest.go | 275 +-- .../image/v5/storage/storage_image.go | 2 +- .../image/v5/storage/storage_reference.go | 33 +- .../image/v5/storage/storage_src.go | 6 +- .../image/v5/storage/storage_transport.go | 23 +- .../image/v5/tarball/tarball_reference.go | 8 +- .../image/v5/tarball/tarball_src.go | 37 +- .../transports/alltransports/alltransports.go | 16 +- .../image/v5/transports/transports.go | 10 +- .../containers/image/v5/types/types.go | 8 +- .../containers/image/v5/version/version.go | 4 +- .../go-openapi/runtime/client_request.go | 3 +- vendor/github.com/go-openapi/strfmt/bson.go | 2 +- vendor/github.com/go-openapi/strfmt/time.go | 14 +- .../github.com/go-openapi/validate/values.go | 8 +- .../fulcio/pkg/certificate/extensions.go | 305 +++- .../generated/models/intoto_v001_schema.go | 4 +- .../generated/models/intoto_v002_schema.go | 8 +- .../pkg/generated/models/search_index.go | 4 +- .../vbauerster/mpb/v7/cwriter/writer.go | 91 - .../vbauerster/mpb/v7/cwriter/writer_posix.go | 26 - .../mpb/v7/decor/optimistic_string_writer.go | 10 - .../github.com/vbauerster/mpb/v7/progress.go | 424 ----- .../vbauerster/mpb/v7/proxyreader.go | 79 - .../vbauerster/mpb/{v7 => v8}/.gitignore | 0 .../vbauerster/mpb/{v7 => v8}/README.md | 13 +- .../vbauerster/mpb/{v7 => v8}/UNLICENSE | 0 .../vbauerster/mpb/{v7 => v8}/bar.go | 485 +++--- .../vbauerster/mpb/{v7 => v8}/bar_filler.go | 21 +- .../mpb/{v7 => v8}/bar_filler_bar.go | 56 +- .../mpb/{v7 => v8}/bar_filler_nop.go | 6 +- .../mpb/{v7 => v8}/bar_filler_spinner.go | 15 +- .../vbauerster/mpb/{v7 => v8}/bar_option.go | 128 +- .../mpb/{v7 => v8}/container_option.go | 67 +- .../vbauerster/mpb/{v7 => v8}/cwriter/doc.go | 0 .../mpb/{v7 => v8}/cwriter/util_bsd.go | 2 +- .../mpb/{v7 => v8}/cwriter/util_linux.go | 2 +- .../mpb/{v7 => v8}/cwriter/util_solaris.go | 2 +- .../mpb/{v7 => v8}/cwriter/util_zos.go | 2 +- .../vbauerster/mpb/v8/cwriter/writer.go | 59 + .../vbauerster/mpb/v8/cwriter/writer_posix.go | 48 + .../mpb/{v7 => v8}/cwriter/writer_windows.go | 40 +- .../vbauerster/mpb/{v7 => v8}/decor/any.go | 3 +- .../mpb/{v7 => v8}/decor/counters.go | 4 - .../mpb/{v7 => v8}/decor/decorator.go | 32 +- .../vbauerster/mpb/{v7 => v8}/decor/doc.go | 10 +- .../mpb/{v7 => v8}/decor/elapsed.go | 2 - .../vbauerster/mpb/{v7 => v8}/decor/eta.go | 17 +- .../vbauerster/mpb/{v7 => v8}/decor/merge.go | 39 +- .../mpb/{v7 => v8}/decor/moving_average.go | 6 + .../vbauerster/mpb/{v7 => v8}/decor/name.go | 1 - .../mpb/{v7 => v8}/decor/on_abort.go | 8 +- .../mpb/{v7 => v8}/decor/on_complete.go | 8 +- .../mpb/{v7 => v8}/decor/on_condition.go | 4 - .../mpb/{v7 => v8}/decor/percentage.go | 16 +- .../vbauerster/mpb/v8/decor/pool.go | 10 + .../mpb/{v7 => v8}/decor/size_type.go | 24 +- .../mpb/{v7 => v8}/decor/sizeb1000_string.go | 0 .../mpb/{v7 => v8}/decor/sizeb1024_string.go | 0 .../vbauerster/mpb/{v7 => v8}/decor/speed.go | 25 +- .../mpb/{v7 => v8}/decor/spinner.go | 0 .../vbauerster/mpb/{v7 => v8}/doc.go | 0 .../vbauerster/mpb/v8/heap_manager.go | 171 ++ .../mpb/{v7 => v8}/internal/percentage.go | 0 .../mpb/{v7 => v8}/internal/width.go | 0 .../mpb/{v7 => v8}/priority_queue.go | 7 +- .../github.com/vbauerster/mpb/v8/progress.go | 451 +++++ .../vbauerster/mpb/v8/proxyreader.go | 96 + .../vbauerster/mpb/v8/proxywriter.go | 96 + .../mongo-driver/x/bsonx/bsoncore/value.go | 2 +- vendor/golang.org/x/exp/LICENSE | 27 + vendor/golang.org/x/exp/PATENTS | 22 + .../x/exp/constraints/constraints.go | 50 + vendor/golang.org/x/exp/maps/maps.go | 94 + vendor/golang.org/x/exp/slices/slices.go | 258 +++ vendor/golang.org/x/exp/slices/sort.go | 126 ++ vendor/golang.org/x/exp/slices/zsortfunc.go | 479 +++++ .../golang.org/x/exp/slices/zsortordered.go | 481 +++++ vendor/google.golang.org/grpc/CONTRIBUTING.md | 29 +- .../grpc_binarylog_v1/binarylog.pb.go | 2 +- vendor/google.golang.org/grpc/clientconn.go | 23 +- .../grpc/codes/code_string.go | 51 +- vendor/google.golang.org/grpc/dialoptions.go | 17 +- .../grpc/internal/binarylog/binarylog.go | 8 +- .../grpc/internal/binarylog/method_logger.go | 5 +- .../grpc/internal/grpclog/prefixLogger.go | 12 + .../grpc/internal/internal.go | 10 + .../grpc/internal/metadata/metadata.go | 62 +- .../grpc/internal/transport/controlbuf.go | 82 +- .../grpc/internal/transport/http2_client.go | 21 +- .../grpc/internal/transport/http2_server.go | 27 +- .../grpc/internal/transport/http_util.go | 24 +- .../grpc/internal/transport/transport.go | 23 +- .../grpc/metadata/metadata.go | 13 +- .../grpc/resolver/resolver.go | 20 +- vendor/google.golang.org/grpc/rpc_util.go | 53 +- vendor/google.golang.org/grpc/server.go | 151 +- vendor/google.golang.org/grpc/stats/stats.go | 22 +- vendor/google.golang.org/grpc/stream.go | 61 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 15 +- .../protobuf/encoding/protojson/doc.go | 2 +- .../encoding/protojson/well_known_types.go | 12 +- .../protobuf/encoding/protowire/wire.go | 8 +- .../protobuf/internal/encoding/json/decode.go | 2 +- .../protobuf/internal/encoding/text/decode.go | 5 +- .../internal/encoding/text/decode_number.go | 43 +- .../protobuf/internal/genid/descriptor_gen.go | 90 +- .../protobuf/internal/impl/convert.go | 1 - .../protobuf/internal/strs/strings_unsafe.go | 2 +- .../protobuf/internal/version/version.go | 4 +- .../google.golang.org/protobuf/proto/doc.go | 9 +- .../google.golang.org/protobuf/proto/equal.go | 172 +- .../reflect/protoreflect/source_gen.go | 14 + .../protobuf/reflect/protoreflect/value.go | 2 +- .../reflect/protoreflect/value_equal.go | 168 ++ .../reflect/protoreflect/value_union.go | 4 +- .../reflect/protoregistry/registry.go | 2 +- .../types/descriptorpb/descriptor.pb.go | 1547 +++++++++++------ .../protobuf/types/known/anypb/any.pb.go | 135 +- .../types/known/durationpb/duration.pb.go | 63 +- .../types/known/timestamppb/timestamp.pb.go | 61 +- vendor/modules.txt | 50 +- 217 files changed, 8474 insertions(+), 4868 deletions(-) create mode 100644 vendor/github.com/containers/image/v5/copy/multiple.go create mode 100644 vendor/github.com/containers/image/v5/copy/single.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/common.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/list.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/manifest.go create mode 100644 vendor/github.com/containers/image/v5/internal/manifest/oci_index.go create mode 100644 vendor/github.com/containers/image/v5/internal/set/set.go delete mode 100644 vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go delete mode 100644 vendor/github.com/vbauerster/mpb/v7/cwriter/writer_posix.go delete mode 100644 vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go delete mode 100644 vendor/github.com/vbauerster/mpb/v7/progress.go delete mode 100644 vendor/github.com/vbauerster/mpb/v7/proxyreader.go rename vendor/github.com/vbauerster/mpb/{v7 => v8}/.gitignore (100%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/README.md (89%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/UNLICENSE (100%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/bar.go (53%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/bar_filler.go (52%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/bar_filler_bar.go (85%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/bar_filler_nop.go (60%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/bar_filler_spinner.go (85%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/bar_option.go (59%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/container_option.go (60%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/cwriter/doc.go (100%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/cwriter/util_bsd.go (58%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/cwriter/util_linux.go (78%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/cwriter/util_solaris.go (82%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/cwriter/util_zos.go (85%) create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/writer.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/cwriter/writer_posix.go rename vendor/github.com/vbauerster/mpb/{v7 => v8}/cwriter/writer_windows.go (70%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/any.go (92%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/counters.go (99%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/decorator.go (88%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/doc.go (75%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/elapsed.go (99%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/eta.go (94%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/merge.go (70%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/moving_average.go (90%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/name.go (98%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/on_abort.go (86%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/on_complete.go (84%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/on_condition.go (98%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/percentage.go (78%) create mode 100644 vendor/github.com/vbauerster/mpb/v8/decor/pool.go rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/size_type.go (77%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/sizeb1000_string.go (100%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/sizeb1024_string.go (100%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/speed.go (90%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/decor/spinner.go (100%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/doc.go (100%) create mode 100644 vendor/github.com/vbauerster/mpb/v8/heap_manager.go rename vendor/github.com/vbauerster/mpb/{v7 => v8}/internal/percentage.go (100%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/internal/width.go (100%) rename vendor/github.com/vbauerster/mpb/{v7 => v8}/priority_queue.go (84%) create mode 100644 vendor/github.com/vbauerster/mpb/v8/progress.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/proxyreader.go create mode 100644 vendor/github.com/vbauerster/mpb/v8/proxywriter.go create mode 100644 vendor/golang.org/x/exp/LICENSE create mode 100644 vendor/golang.org/x/exp/PATENTS create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go create mode 100644 vendor/golang.org/x/exp/maps/maps.go create mode 100644 vendor/golang.org/x/exp/slices/slices.go create mode 100644 vendor/golang.org/x/exp/slices/sort.go create mode 100644 vendor/golang.org/x/exp/slices/zsortfunc.go create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go diff --git a/go.mod b/go.mod index fdd80e56f..bda94647e 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/containerd/containerd v1.7.0 github.com/containernetworking/cni v1.1.2 github.com/containernetworking/plugins v1.2.0 - github.com/containers/image/v5 v5.24.2 + github.com/containers/image/v5 v5.25.0 github.com/containers/ocicrypt v1.1.7 github.com/containers/storage v1.46.0 github.com/coreos/go-systemd/v22 v22.5.0 @@ -53,7 +53,7 @@ require ( github.com/Microsoft/hcsshim v0.10.0-rc.7 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect - github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect @@ -67,11 +67,11 @@ require ( github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/runtime v0.24.1 // indirect - github.com/go-openapi/spec v0.20.7 // indirect - github.com/go-openapi/strfmt v0.21.5 // indirect + github.com/go-openapi/runtime v0.25.0 // indirect + github.com/go-openapi/spec v0.20.8 // indirect + github.com/go-openapi/strfmt v0.21.7 // indirect github.com/go-openapi/swag v0.22.3 // indirect - github.com/go-openapi/validate v0.22.0 // indirect + github.com/go-openapi/validate v0.22.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -105,8 +105,9 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/proglottis/gpgme v0.1.3 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/sigstore/fulcio v1.1.0 // indirect - github.com/sigstore/rekor v1.0.1 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/sigstore/fulcio v1.2.0 // indirect + github.com/sigstore/rekor v1.1.0 // indirect github.com/sigstore/sigstore v1.6.0 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect github.com/sylabs/sif/v2 v2.11.1 // indirect @@ -115,18 +116,19 @@ require ( github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/vbatts/tar-split v0.11.3 // indirect - github.com/vbauerster/mpb/v7 v7.5.3 // indirect + github.com/vbauerster/mpb/v8 v8.3.0 // indirect github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect - go.mongodb.org/mongo-driver v1.11.1 // indirect + go.mongodb.org/mongo-driver v1.11.3 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.9.0 // indirect golang.org/x/net v0.8.0 // indirect golang.org/x/text v0.8.0 // indirect golang.org/x/tools v0.7.0 // indirect google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect - google.golang.org/grpc v1.53.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/grpc v1.54.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 5f07494a6..0127c7666 100644 --- a/go.sum +++ b/go.sum @@ -15,8 +15,8 @@ github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAU github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -46,8 +46,8 @@ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl3 github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU= github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= -github.com/containers/image/v5 v5.24.2 h1:QcMsHBAXBPPnVYo6iEFarvaIpym7sBlwsGHPJlucxN0= -github.com/containers/image/v5 v5.24.2/go.mod h1:oss5F6ssGQz8ZtC79oY+fuzYA3m3zBek9tq9gmhuvHc= +github.com/containers/image/v5 v5.25.0 h1:TJ0unmalbU+scd0i3Txap2wjGsAnv06MSCwgn6bsizk= +github.com/containers/image/v5 v5.25.0/go.mod h1:EKvys0WVlRFkDw26R8y52TuhV9Tfn0yq2luLX6W52Ls= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U= @@ -75,7 +75,6 @@ github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryef github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -110,28 +109,25 @@ github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXym github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.24.1 h1:Sml5cgQKGYQHF+M7yYSHaH1eOjvTykrddTE/KtQVjqo= -github.com/go-openapi/runtime v0.24.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= +github.com/go-openapi/runtime v0.25.0 h1:7yQTCdRbWhX8vnIjdzU8S00tBYf7Sg71EBeorlPHvhc= +github.com/go-openapi/runtime v0.25.0/go.mod h1:Ux6fikcHXyyob6LNWxtE96hWwjBPYF0DXgVFuMTneOs= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI= -github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= +github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.5 h1:Z/algjpXIZpbvdN+6KbVTkpO75RuedMrqpn1GN529h4= -github.com/go-openapi/strfmt v0.21.5/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y= -github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= +github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= @@ -245,7 +241,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -261,7 +257,6 @@ github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYt github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= @@ -273,13 +268,12 @@ github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -321,7 +315,6 @@ github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go. github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= @@ -338,7 +331,7 @@ github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7G github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= @@ -346,15 +339,17 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sigstore/fulcio v1.1.0 h1:mzzJ05Ccu8Y2inyioklNvc8MpzlGHxu8YqNeTm0dHfU= -github.com/sigstore/fulcio v1.1.0/go.mod h1:zv1ZQTXZbUwQdRwajlQksc34pRas+2aZYpIZoQBNev8= -github.com/sigstore/rekor v1.0.1 h1:rcESXSNkAPRWFYZel9rarspdvneET60F2ngNkadi89c= -github.com/sigstore/rekor v1.0.1/go.mod h1:ecTKdZWGWqE1pl3U1m1JebQJLU/hSjD9vYHOmHQ7w4g= +github.com/sigstore/fulcio v1.2.0 h1:I4H764cDbryKXkPtasUvo8bcix/7xLvkxWYWNp+JtWI= +github.com/sigstore/fulcio v1.2.0/go.mod h1:FS7qpBvOEqs0uEh1+hJxzxtJistWN29ybLtAzFNUi0c= +github.com/sigstore/rekor v1.1.0 h1:9fjPvW0WERE7VPtSSVSTbDLLOsrNx3RtiIeZ4/1tmDI= +github.com/sigstore/rekor v1.1.0/go.mod h1:jEOGDGPMURBt9WR50N0rO7X8GZzLE3UQT+ln6BKJ/m0= github.com/sigstore/sigstore v1.6.0 h1:0fYHVoUlPU3WM8o3U1jT9SI2lqQE68XbG+qWncXaZC8= github.com/sigstore/sigstore v1.6.0/go.mod h1:+55pf6HZ15kf60c08W+GH95JQbAcnVyUBquQGSVdsto= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -404,8 +399,8 @@ github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= -github.com/vbauerster/mpb/v7 v7.5.3 h1:BkGfmb6nMrrBQDFECR/Q7RkKCw7ylMetCb4079CGs4w= -github.com/vbauerster/mpb/v7 v7.5.3/go.mod h1:i+h4QY6lmLvBNK2ah1fSreiw3ajskRlBp9AhY/PnuOE= +github.com/vbauerster/mpb/v8 v8.3.0 h1:xw2eMJ6v5NP8Rd7yOVzU6OqnRPrS1yWAoLTrWe7W4Nc= +github.com/vbauerster/mpb/v8 v8.3.0/go.mod h1:bngtYUAu25QGxcYYglsF6oyoHlC9Yhh582xF9LjfmL4= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= @@ -431,10 +426,9 @@ go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.11.1 h1:QP0znIRTuL0jf1oBQoAoM0C6ZJfBK4kx0Uumtv1A7w8= -go.mongodb.org/mongo-driver v1.11.1/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= +go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= @@ -446,13 +440,14 @@ golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaE golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -475,7 +470,6 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -520,12 +514,9 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -567,8 +558,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -580,8 +571,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -610,7 +601,7 @@ gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go index 46ecfc84a..c9c4fac06 100644 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -454,27 +454,26 @@ func IsCreditCard(str string) bool { if !rxCreditCard.MatchString(sanitized) { return false } + + number, _ := ToInt(sanitized) + number, lastDigit := number / 10, number % 10 + var sum int64 - var digit string - var tmpNum int64 - var shouldDouble bool - for i := len(sanitized) - 1; i >= 0; i-- { - digit = sanitized[i:(i + 1)] - tmpNum, _ = ToInt(digit) - if shouldDouble { - tmpNum *= 2 - if tmpNum >= 10 { - sum += (tmpNum % 10) + 1 - } else { - sum += tmpNum + for i:=0; number > 0; i++ { + digit := number % 10 + + if i % 2 == 0 { + digit *= 2 + if digit > 9 { + digit -= 9 } - } else { - sum += tmpNum } - shouldDouble = !shouldDouble + + sum += digit + number = number / 10 } - - return sum%10 == 0 + + return (sum + lastDigit) % 10 == 0 } // IsISBN10 checks if the string is an ISBN version 10. diff --git a/vendor/github.com/containers/image/v5/copy/blob.go b/vendor/github.com/containers/image/v5/copy/blob.go index cfac3e6d5..96674ddbb 100644 --- a/vendor/github.com/containers/image/v5/copy/blob.go +++ b/vendor/github.com/containers/image/v5/copy/blob.go @@ -104,12 +104,11 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read if !isConfig { options.LayerIndex = &layerIndex } - uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options) + destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options) if err != nil { return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err) } - - uploadedInfo.Annotations = stream.info.Annotations + uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob) compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations) decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation) @@ -169,3 +168,20 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) { } return n, err } + +// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo. +func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo { + // The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + return types.BlobInfo{ + Digest: uploadedBlob.Digest, + Size: uploadedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + Annotations: inputInfo.Annotations, + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto. + CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream. + CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream. + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream. + } +} diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index 5286975b6..eb4da5092 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -6,10 +6,30 @@ import ( "io" internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/compression" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" +) + +var ( + // defaultCompressionFormat is used if the destination transport requests + // compression, and the user does not explicitly instruct us to use an algorithm. + defaultCompressionFormat = &compression.Gzip + + // compressionBufferSize is the buffer size used to compress a blob + compressionBufferSize = 1048576 + + // expectedCompressionFormats is used to check if a blob with a specified media type is compressed + // using the algorithm that the media type says it should be compressed with + expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ + imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, + imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, + manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, + } ) // bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step. @@ -109,13 +129,13 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed { logrus.Debugf("Compressing blob on the fly") var uploadedAlgorithm *compressiontypes.Algorithm - if ic.c.compressionFormat != nil { - uploadedAlgorithm = ic.c.compressionFormat + if ic.compressionFormat != nil { + uploadedAlgorithm = ic.compressionFormat } else { uploadedAlgorithm = defaultCompressionFormat } - reader, annotations := ic.c.compressedStream(stream.reader, *uploadedAlgorithm) + reader, annotations := ic.compressedStream(stream.reader, *uploadedAlgorithm) // Note: reader must be closed on all return paths. stream.reader = reader stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? @@ -137,7 +157,7 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp // bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so. func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) { if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed && - ic.c.compressionFormat != nil && ic.c.compressionFormat.Name() != detected.format.Name() { + ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() { // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally // re-compressed using the desired format. logrus.Debugf("Blob will be converted") @@ -153,20 +173,20 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp } }() - recompressed, annotations := ic.c.compressedStream(decompressed, *ic.c.compressionFormat) + recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat) // Note: recompressed must be closed on all return paths. stream.reader = recompressed - stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations. Digest: "", Size: -1, } succeeded = true return &bpCompressionStepData{ operation: types.PreserveOriginal, - uploadedAlgorithm: ic.c.compressionFormat, + uploadedAlgorithm: ic.compressionFormat, uploadedAnnotations: annotations, srcCompressorName: detected.srcCompressorName, - uploadedCompressorName: ic.c.compressionFormat.Name(), + uploadedCompressorName: ic.compressionFormat.Name(), closers: []io.Closer{decompressed, recompressed}, }, nil } @@ -183,7 +203,7 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp } // Note: s must be closed on all return paths. stream.reader = s - stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? + stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info? Notably the current approach correctly removes zstd:chunked metadata annotations. Digest: "", Size: -1, } @@ -199,7 +219,9 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp } // bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob. -func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData, +// This does not change the sourceStream parameter; we include it for symmetry with other +// pipeline steps. +func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData, layerCompressionChangeSupported bool) *bpCompressionStepData { logrus.Debugf("Using original blob without modification") // Remember if the original blob was compressed, and if so how, so that if @@ -232,9 +254,7 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom if *annotations == nil { *annotations = map[string]string{} } - for k, v := range d.uploadedAnnotations { - (*annotations)[k] = v - } + maps.Copy(*annotations, d.uploadedAnnotations) } // recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo. @@ -298,24 +318,24 @@ func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, co } // compressGoroutine reads all input from src and writes its compressed equivalent to dest. -func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) { +func (ic *imageCopier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) { err := errors.New("Internal error: unexpected panic in compressGoroutine") defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. _ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil }() - err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel) + err = doCompression(dest, src, metadata, compressionFormat, ic.compressionLevel) } // compressedStream returns a stream the input reader compressed using format, and a metadata map. // The caller must close the returned reader. // AFTER the stream is consumed, metadata will be updated with annotations to use on the data. -func (c *copier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) { +func (ic *imageCopier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) { pipeReader, pipeWriter := io.Pipe() annotations := map[string]string{} // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, // we don’t care. - go c.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter + go ic.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter return pipeReader, annotations } diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index 26521fe09..bf8f4015b 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -1,15 +1,11 @@ package copy import ( - "bytes" "context" "errors" "fmt" "io" "os" - "reflect" - "strings" - "sync" "time" "github.com/containers/image/v5/docker/reference" @@ -17,21 +13,18 @@ import ( "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/internal/imagedestination" "github.com/containers/image/v5/internal/imagesource" - "github.com/containers/image/v5/internal/pkg/platform" + internalManifest "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache" - "github.com/containers/image/v5/pkg/compression" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/signature/signer" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "github.com/vbauerster/mpb/v7" + "golang.org/x/exp/slices" "golang.org/x/sync/semaphore" "golang.org/x/term" ) @@ -43,55 +36,8 @@ var ( // maxParallelDownloads is used to limit the maximum number of parallel // downloads. Let's follow Firefox by limiting it to 6. maxParallelDownloads = uint(6) - - // defaultCompressionFormat is used if the destination transport requests - // compression, and the user does not explicitly instruct us to use an algorithm. - defaultCompressionFormat = &compression.Gzip ) -// compressionBufferSize is the buffer size used to compress a blob -var compressionBufferSize = 1048576 - -// expectedCompressionFormats is used to check if a blob with a specified media type is compressed -// using the algorithm that the media type says it should be compressed with -var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{ - imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, - imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, - manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, -} - -// copier allows us to keep track of diffID values for blobs, and other -// data shared across one or more images in a possible manifest list. -// The owner must call close() when done. -type copier struct { - dest private.ImageDestination - rawSource private.ImageSource - reportWriter io.Writer - progressOutput io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties - blobInfoCache internalblobinfocache.BlobInfoCache2 - compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. - compressionLevel *int - ociDecryptConfig *encconfig.DecryptConfig - ociEncryptConfig *encconfig.EncryptConfig - concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs - downloadForeignLayers bool - signers []*signer.Signer // Signers to use to create new signatures for the image - signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed. -} - -// imageCopier tracks state specific to a single image (possibly an item of a manifest list) -type imageCopier struct { - c *copier - manifestUpdates *types.ManifestUpdateOptions - src *image.SourcedImage - diffIDsAreNeeded bool - cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can - canSubstituteBlobs bool - ociEncryptLayers *[]int -} - const ( // CopySystemImage is the default value which, when set in // Options.ImageListSelection, indicates that the caller expects only one @@ -143,15 +89,18 @@ type Options struct { // Preserve digests, and fail if we cannot. PreserveDigests bool - // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type + // manifest MIME type of image set by user. "" is default and means use the autodetection to the manifest MIME type ForceManifestMIMEType string ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself + // Give priority to pulling gzip images if multiple images are present when configured to OptionalBoolTrue, + // prefers the best compression if this is configured as OptionalBoolFalse. Choose automatically (and the choice may change over time) + // if this is set to OptionalBoolUndefined (which is the default behavior, and recommended for most callers). + // This only affects CopySystemImage. + PreferGzipInstances types.OptionalBool // If OciEncryptConfig is non-nil, it indicates that an image should be encrypted. // The encryption options is derived from the construction of EncryptConfig object. - // Note: During initial encryption process of a layer, the resultant digest is not known - // during creation, so newDigestingReader has to be set with validateDigest = false OciEncryptConfig *encconfig.EncryptConfig // OciEncryptLayers represents the list of layers to encrypt. // If nil, don't encrypt any layers. @@ -179,14 +128,23 @@ type Options struct { DownloadForeignLayers bool } -// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value -func validateImageListSelection(selection ImageListSelection) error { - switch selection { - case CopySystemImage, CopyAllImages, CopySpecificImages: - return nil - default: - return fmt.Errorf("Invalid value for options.ImageListSelection: %d", selection) - } +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +// The owner must call close() when done. +type copier struct { + dest private.ImageDestination + rawSource private.ImageSource + reportWriter io.Writer + progressOutput io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties + blobInfoCache internalblobinfocache.BlobInfoCache2 + ociDecryptConfig *encconfig.DecryptConfig + ociEncryptConfig *encconfig.EncryptConfig + concurrentBlobCopiesSemaphore *semaphore.Weighted // Limits the amount of concurrently copied blobs + downloadForeignLayers bool + signers []*signer.Signer // Signers to use to create new signatures for the image + signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed. } // Image copies image from srcRef to destRef, using policyContext to validate @@ -287,12 +245,6 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, } } - if options.DestinationCtx != nil { - // Note that compressionFormat and compressionLevel can be nil. - c.compressionFormat = options.DestinationCtx.CompressionFormat - c.compressionLevel = options.DestinationCtx.CompressionLevel - } - if err := c.setupSigners(options); err != nil { return nil, err } @@ -305,7 +257,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, if !multiImage { // The simple case: just copy a single image. - if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedToplevel, nil); err != nil { + if copiedManifest, _, _, err = c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedToplevel, nil); err != nil { return nil, err } } else if options.ImageListSelection == CopySystemImage { @@ -315,18 +267,18 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, if err != nil { return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err) } - manifestList, err := manifest.ListFromBlob(mfest, manifestType) + manifestList, err := internalManifest.ListFromBlob(mfest, manifestType) if err != nil { return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err) } - instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx + instanceDigest, err := manifestList.ChooseInstanceByCompression(options.SourceCtx, options.PreferGzipInstances) // try to pick one that matches options.SourceCtx if err != nil { return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err) } logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest) unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) - if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil { + if copiedManifest, _, _, err = c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil { return nil, fmt.Errorf("copying system image from manifest list: %w", err) } } else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */ @@ -353,6 +305,15 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, return copiedManifest, nil } +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...any) { + fmt.Fprintf(c.reportWriter, format, a...) +} + // close tears down state owned by copier. func (c *copier) close() { for i, s := range c.signersToClose { @@ -362,6 +323,16 @@ func (c *copier) close() { } } +// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value +func validateImageListSelection(selection ImageListSelection) error { + switch selection { + case CopySystemImage, CopyAllImages, CopySpecificImages: + return nil + default: + return fmt.Errorf("Invalid value for options.ImageListSelection: %d", selection) + } +} + // Checks if the destination supports accepting multiple images by checking if it can support // manifest types that are lists of other manifests. func supportsMultipleImages(dest types.ImageDestination) bool { @@ -370,518 +341,7 @@ func supportsMultipleImages(dest types.ImageDestination) bool { // Anything goes! return true } - for _, mtype := range mtypes { - if manifest.MIMETypeIsMultiImage(mtype) { - return true - } - } - return false -} - -// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the -// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal. -func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) { - srcManifestDigest, err := manifest.Digest(src.ManifestBlob) - if err != nil { - return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err) - } - - destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx) - if err != nil { - logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err) - return false, nil, "", "", nil - } - - destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance) - if err != nil { - logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err) - return false, nil, "", "", nil - } - - destManifestDigest, err := manifest.Digest(destManifest) - if err != nil { - return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err) - } - - logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest) - if srcManifestDigest != destManifestDigest { - return false, nil, "", "", nil - } - - // Destination and source manifests, types and digests should all be equivalent - return true, destManifest, destManifestType, destManifestDigest, nil -} - -// copyMultipleImages copies some or all of an image list's instances, using -// policyContext to validate source image admissibility. -func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) { - // Parse the list and get a copy of the original value after it's re-encoded. - manifestList, manifestType, err := unparsedToplevel.Manifest(ctx) - if err != nil { - return nil, fmt.Errorf("reading manifest list: %w", err) - } - originalList, err := manifest.ListFromBlob(manifestList, manifestType) - if err != nil { - return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err) - } - updatedList := originalList.Clone() - - sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options, - "Getting image list signatures", - "Checking if image list destination supports signatures") - if err != nil { - return nil, err - } - - // If the destination is a digested reference, make a note of that, determine what digest value we're - // expecting, and check that the source manifest matches it. - destIsDigestedReference := false - if named := c.dest.Reference().DockerReference(); named != nil { - if digested, ok := named.(reference.Digested); ok { - destIsDigestedReference = true - matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) - if err != nil { - return nil, fmt.Errorf("computing digest of source image's manifest: %w", err) - } - if !matches { - return nil, errors.New("Digest of source image's manifest would not match destination reference") - } - } - } - - // Determine if we're allowed to modify the manifest list. - // If we can, set to the empty string. If we can't, set to the reason why. - // Compare, and perhaps keep in sync with, the version in copyOneImage. - cannotModifyManifestListReason := "" - if len(sigs) > 0 { - cannotModifyManifestListReason = "Would invalidate signatures" - } - if destIsDigestedReference { - cannotModifyManifestListReason = "Destination specifies a digest" - } - if options.PreserveDigests { - cannotModifyManifestListReason = "Instructed to preserve digests" - } - - // Determine if we'll need to convert the manifest list to a different format. - forceListMIMEType := options.ForceManifestMIMEType - switch forceListMIMEType { - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: - forceListMIMEType = manifest.DockerV2ListMediaType - case imgspecv1.MediaTypeImageManifest: - forceListMIMEType = imgspecv1.MediaTypeImageIndex - } - selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) - if err != nil { - return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err) - } - if selectedListType != originalList.MIMEType() { - if cannotModifyManifestListReason != "" { - return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) - } - } - - // Copy each image, or just the ones we want to copy, in turn. - instanceDigests := updatedList.Instances() - imagesToCopy := len(instanceDigests) - if options.ImageListSelection == CopySpecificImages { - imagesToCopy = len(options.Instances) - } - c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests)) - updates := make([]manifest.ListUpdate, len(instanceDigests)) - instancesCopied := 0 - for i, instanceDigest := range instanceDigests { - if options.ImageListSelection == CopySpecificImages { - skip := true - for _, instance := range options.Instances { - if instance == instanceDigest { - skip = false - break - } - } - if skip { - update, err := updatedList.Instance(instanceDigest) - if err != nil { - return nil, err - } - logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) - // Record the digest/size/type of the manifest that we didn't copy. - updates[i] = update - continue - } - } - logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) - c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy) - unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest) - updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest) - if err != nil { - return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err) - } - instancesCopied++ - // Record the result of a possible conversion here. - update := manifest.ListUpdate{ - Digest: updatedManifestDigest, - Size: int64(len(updatedManifest)), - MediaType: updatedManifestType, - } - updates[i] = update - } - - // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. - if err = updatedList.UpdateInstances(updates); err != nil { - return nil, fmt.Errorf("updating manifest list: %w", err) - } - - // Iterate through supported list types, preferred format first. - c.Printf("Writing manifest list to image destination\n") - var errs []string - for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) { - attemptedList := updatedList - - logrus.Debugf("Trying to use manifest list type %s…", thisListType) - - // Perform the list conversion, if we need one. - if thisListType != updatedList.MIMEType() { - attemptedList, err = updatedList.ConvertToMIMEType(thisListType) - if err != nil { - return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err) - } - } - - // Check if the updates or a type conversion meaningfully changed the list of images - // by serializing them both so that we can compare them. - attemptedManifestList, err := attemptedList.Serialize() - if err != nil { - return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err) - } - originalManifestList, err := originalList.Serialize() - if err != nil { - return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err) - } - - // If we can't just use the original value, but we have to change it, flag an error. - if !bytes.Equal(attemptedManifestList, originalManifestList) { - if cannotModifyManifestListReason != "" { - return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) - } - logrus.Debugf("Manifest list has been updated") - } else { - // We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest. - attemptedManifestList = manifestList - } - - // Save the manifest list. - err = c.dest.PutManifest(ctx, attemptedManifestList, nil) - if err != nil { - logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err) - errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err)) - continue - } - errs = nil - manifestList = attemptedManifestList - break - } - if errs != nil { - return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", ")) - } - - // Sign the manifest list. - newSigs, err := c.createSignatures(ctx, manifestList, options.SignIdentity) - if err != nil { - return nil, err - } - sigs = append(sigs, newSigs...) - - c.Printf("Storing list signatures\n") - if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil { - return nil, fmt.Errorf("writing signatures: %w", err) - } - - return manifestList, nil -} - -// copyOneImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate -// source image admissibility. -func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) { - // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. - // Make sure we fail cleanly in such cases. - multiImage, err := isMultiImage(ctx, unparsedImage) - if err != nil { - // FIXME FIXME: How to name a reference for the sub-image? - return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err) - } - if multiImage { - return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") - } - - // Please keep this policy check BEFORE reading any other information about the image. - // (The multiImage check above only matches the MIME type, which we have received anyway. - // Actual parsing of anything should be deferred.) - if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. - return nil, "", "", fmt.Errorf("Source image rejected: %w", err) - } - src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) - if err != nil { - return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err) - } - - // If the destination is a digested reference, make a note of that, determine what digest value we're - // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's - // one item from a manifest list that matches it, accept that as a match. - destIsDigestedReference := false - if named := c.dest.Reference().DockerReference(); named != nil { - if digested, ok := named.(reference.Digested); ok { - destIsDigestedReference = true - matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest()) - if err != nil { - return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err) - } - if !matches { - manifestList, _, err := unparsedToplevel.Manifest(ctx) - if err != nil { - return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err) - } - matches, err = manifest.MatchesDigest(manifestList, digested.Digest()) - if err != nil { - return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err) - } - if !matches { - return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference") - } - } - } - } - - if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil { - return nil, "", "", err - } - - sigs, err := c.sourceSignatures(ctx, src, options, - "Getting image source signatures", - "Checking if image destination supports signatures") - if err != nil { - return nil, "", "", err - } - - // Determine if we're allowed to modify the manifest. - // If we can, set to the empty string. If we can't, set to the reason why. - // Compare, and perhaps keep in sync with, the version in copyMultipleImages. - cannotModifyManifestReason := "" - if len(sigs) > 0 { - cannotModifyManifestReason = "Would invalidate signatures" - } - if destIsDigestedReference { - cannotModifyManifestReason = "Destination specifies a digest" - } - if options.PreserveDigests { - cannotModifyManifestReason = "Instructed to preserve digests" - } - - ic := imageCopier{ - c: c, - manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, - src: src, - // diffIDsAreNeeded is computed later - cannotModifyManifestReason: cannotModifyManifestReason, - ociEncryptLayers: options.OciEncryptLayers, - } - // Decide whether we can substitute blobs with semantic equivalents: - // - Don’t do that if we can’t modify the manifest at all - // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. - // This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path: - // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. - // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk - // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, - // and we would reuse and sign it. - ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0 - - if err := ic.updateEmbeddedDockerReference(); err != nil { - return nil, "", "", err - } - - destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil - - manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{ - srcMIMEType: ic.src.ManifestMIMEType, - destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), - forceManifestMIMEType: options.ForceManifestMIMEType, - requiresOCIEncryption: destRequiresOciEncryption, - cannotModifyManifestReason: ic.cannotModifyManifestReason, - }) - if err != nil { - return nil, "", "", err - } - // We set up this part of ic.manifestUpdates quite early, not just around the - // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code - // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based - // on the expected destination format. - if manifestConversionPlan.preferredMIMETypeNeedsConversion { - ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType - } - - // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. - ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) - - // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal - if options.OptimizeDestinationImageAlreadyExists { - shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible - noPendingManifestUpdates := ic.noPendingManifestUpdates() - - logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates) - if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates { - isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest) - if err != nil { - logrus.Warnf("Failed to compare destination image manifest: %v", err) - return nil, "", "", err - } - - if isSrcDestManifestEqual { - c.Printf("Skipping: image already present at destination\n") - return retManifest, retManifestType, retManifestDigest, nil - } - } - } - - if err := ic.copyLayers(ctx); err != nil { - return nil, "", "", err - } - - // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; - // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support - // without actually trying to upload something and getting a types.ManifestTypeRejectedError. - // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if - // we're altering how they're compressed. If the process succeeds, fine… - manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) - retManifestType = manifestConversionPlan.preferredMIMEType - if err != nil { - logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err) - // … if it fails, and the failure is either because the manifest is rejected by the registry, or - // because we failed to create a manifest of the specified type because the specific manifest type - // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may - // have other options available that could still succeed. - var manifestTypeRejectedError types.ManifestTypeRejectedError - var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError - isManifestRejected := errors.As(err, &manifestTypeRejectedError) - isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError) - if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 { - // We don’t have other options. - // In principle the code below would handle this as well, but the resulting error message is fairly ugly. - // Don’t bother the user with MIME types if we have no choice. - return nil, "", "", err - } - // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType. - // So if we are here, we will definitely be trying to convert the manifest. - // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, - // so let’s bail out early and with a better error message. - if ic.cannotModifyManifestReason != "" { - return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err) - } - - // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. - errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)} - for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates { - logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) - ic.manifestUpdates.ManifestMIMEType = manifestMIMEType - attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) - if err != nil { - logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) - errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) - continue - } - - // We have successfully uploaded a manifest. - manifestBytes = attemptedManifest - retManifestDigest = attemptedManifestDigest - retManifestType = manifestMIMEType - errs = nil // Mark this as a success so that we don't abort below. - break - } - if errs != nil { - return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) - } - } - if targetInstance != nil { - targetInstance = &retManifestDigest - } - - newSigs, err := c.createSignatures(ctx, manifestBytes, options.SignIdentity) - if err != nil { - return nil, "", "", err - } - sigs = append(sigs, newSigs...) - - c.Printf("Storing signatures\n") - if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { - return nil, "", "", fmt.Errorf("writing signatures: %w", err) - } - - return manifestBytes, retManifestType, retManifestDigest, nil -} - -// Printf writes a formatted string to c.reportWriter. -// Note that the method name Printf is not entirely arbitrary: (go tool vet) -// has a built-in list of functions/methods (whatever object they are for) -// which have their format strings checked; for other names we would have -// to pass a parameter to every (go tool vet) invocation. -func (c *copier) Printf(format string, a ...interface{}) { - fmt.Fprintf(c.reportWriter, format, a...) -} - -// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary. -func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { - if dest.MustMatchRuntimeOS() { - c, err := src.OCIConfig(ctx) - if err != nil { - return fmt.Errorf("parsing image configuration: %w", err) - } - wantedPlatforms, err := platform.WantedPlatforms(sys) - if err != nil { - return fmt.Errorf("getting current platform information %#v: %w", sys, err) - } - - options := newOrderedSet() - match := false - for _, wantedPlatform := range wantedPlatforms { - // Waiting for https://github.com/opencontainers/image-spec/pull/777 : - // This currently can’t use image.MatchesPlatform because we don’t know what to use - // for image.Variant. - if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture { - match = true - break - } - options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture)) - } - if !match { - logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q", - c.OS, c.Architecture, strings.Join(options.list, ", ")) - } - } - return nil -} - -// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. -func (ic *imageCopier) updateEmbeddedDockerReference() error { - if ic.c.dest.IgnoresEmbeddedDockerReference() { - return nil // Destination would prefer us not to update the embedded reference. - } - destRef := ic.c.dest.Reference().DockerReference() - if destRef == nil { - return nil // Destination does not care about Docker references - } - if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { - return nil // No reference embedded in the manifest, or it matches destRef already. - } - - if ic.cannotModifyManifestReason != "" { - return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", - transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) - } - ic.manifestUpdates.EmbeddedDockerReference = destRef - return nil -} - -func (ic *imageCopier) noPendingManifestUpdates() bool { - return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) + return slices.ContainsFunc(mtypes, manifest.MIMETypeIsMultiImage) } // isTTY returns true if the io.Writer is a file and a tty. @@ -891,458 +351,3 @@ func isTTY(w io.Writer) bool { } return false } - -// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". -func (ic *imageCopier) copyLayers(ctx context.Context) error { - srcInfos := ic.src.LayerInfos() - numLayers := len(srcInfos) - updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) - if err != nil { - return err - } - srcInfosUpdated := false - // If we only need to check authorization, no updates required. - if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { - if ic.cannotModifyManifestReason != "" { - return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) - } - srcInfos = updatedSrcInfos - srcInfosUpdated = true - } - - type copyLayerData struct { - destInfo types.BlobInfo - diffID digest.Digest - err error - } - - // The manifest is used to extract the information whether a given - // layer is empty. - man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType) - if err != nil { - return err - } - manifestLayerInfos := man.LayerInfos() - - // copyGroup is used to determine if all layers are copied - copyGroup := sync.WaitGroup{} - - data := make([]copyLayerData, numLayers) - copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) { - defer ic.c.concurrentBlobCopiesSemaphore.Release(1) - defer copyGroup.Done() - cld := copyLayerData{} - if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { - // DiffIDs are, currently, needed only when converting from schema1. - // In which case src.LayerInfos will not have URLs because schema1 - // does not support them. - if ic.diffIDsAreNeeded { - cld.err = errors.New("getting DiffID for foreign layers is unimplemented") - } else { - cld.destInfo = srcLayer - logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) - } - } else { - cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer) - } - data[index] = cld - } - - // Create layer Encryption map - encLayerBitmap := map[int]bool{} - var encryptAll bool - if ic.ociEncryptLayers != nil { - encryptAll = len(*ic.ociEncryptLayers) == 0 - totalLayers := len(srcInfos) - for _, l := range *ic.ociEncryptLayers { - // if layer is negative, it is reverse indexed. - encLayerBitmap[(totalLayers+l)%totalLayers] = true - } - - if encryptAll { - for i := 0; i < len(srcInfos); i++ { - encLayerBitmap[i] = true - } - } - } - - if err := func() error { // A scope for defer - progressPool := ic.c.newProgressPool() - defer progressPool.Wait() - - // Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool. - defer copyGroup.Wait() - - for i, srcLayer := range srcInfos { - err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1) - if err != nil { - // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. - return fmt.Errorf("copying layer: %w", err) - } - copyGroup.Add(1) - go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool, ic.c.rawSource.Reference().DockerReference()) - } - - // A call to copyGroup.Wait() is done at this point by the defer above. - return nil - }(); err != nil { - return err - } - - destInfos := make([]types.BlobInfo, numLayers) - diffIDs := make([]digest.Digest, numLayers) - for i, cld := range data { - if cld.err != nil { - return cld.err - } - destInfos[i] = cld.destInfo - diffIDs[i] = cld.diffID - } - - // WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the - // OptimizeDestinationImageAlreadyExists short-circuit conditions - ic.manifestUpdates.InformationOnly.LayerInfos = destInfos - if ic.diffIDsAreNeeded { - ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs - } - if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { - ic.manifestUpdates.LayerInfos = destInfos - } - return nil -} - -// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) -func layerDigestsDiffer(a, b []types.BlobInfo) bool { - if len(a) != len(b) { - return true - } - for i := range a { - if a[i].Digest != b[i].Digest { - return true - } - } - return false -} - -// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, -// stores the resulting config and manifest to the destination, and returns the stored manifest -// and its digest. -func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { - var pendingImage types.Image = ic.src - if !ic.noPendingManifestUpdates() { - if ic.cannotModifyManifestReason != "" { - return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) - } - if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { - // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. - // So, this can only happen if we are trying to upload using one of the other MIME type candidates. - // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise - // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. - // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. - // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. - return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) - } - pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) - if err != nil { - return nil, "", fmt.Errorf("creating an updated image manifest: %w", err) - } - pendingImage = pi - } - man, _, err := pendingImage.Manifest(ctx) - if err != nil { - return nil, "", fmt.Errorf("reading manifest: %w", err) - } - - if err := ic.copyConfig(ctx, pendingImage); err != nil { - return nil, "", err - } - - ic.c.Printf("Writing manifest to image destination\n") - manifestDigest, err := manifest.Digest(man) - if err != nil { - return nil, "", err - } - if instanceDigest != nil { - instanceDigest = &manifestDigest - } - if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { - logrus.Debugf("Error %v while writing manifest %q", err, string(man)) - return nil, "", fmt.Errorf("writing manifest: %w", err) - } - return man, manifestDigest, nil -} - -// copyConfig copies config.json, if any, from src to dest. -func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error { - srcInfo := src.ConfigInfo() - if srcInfo.Digest != "" { - if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { - // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. - return fmt.Errorf("copying config: %w", err) - } - defer ic.c.concurrentBlobCopiesSemaphore.Release(1) - - destInfo, err := func() (types.BlobInfo, error) { // A scope for defer - progressPool := ic.c.newProgressPool() - defer progressPool.Wait() - bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done") - defer bar.Abort(false) - ic.c.printCopyInfo("config", srcInfo) - - configBlob, err := src.ConfigBlob(ctx) - if err != nil { - return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err) - } - - destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false) - if err != nil { - return types.BlobInfo{}, err - } - - bar.mark100PercentComplete() - return destInfo, nil - }() - if err != nil { - return err - } - if destInfo.Digest != srcInfo.Digest { - return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) - } - } - return nil -} - -// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. -// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. -type diffIDResult struct { - digest digest.Digest - err error -} - -// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, -// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil. -func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) { - // If the srcInfo doesn't contain compression information, try to compute it from the - // MediaType, which was either read from a manifest by way of LayerInfos() or constructed - // by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob, - // the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(), - // which uses the compression information to compute the updated MediaType values. - // (Sadly UpdatedImage() is documented to not update MediaTypes from - // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) - // - // This MIME type → compression mapping belongs in manifest-specific code in our manifest - // package (but we should preferably replace/change UpdatedImage instead of productizing - // this workaround). - if srcInfo.CompressionAlgorithm == nil { - switch srcInfo.MediaType { - case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: - srcInfo.CompressionAlgorithm = &compression.Gzip - case imgspecv1.MediaTypeImageLayerZstd: - srcInfo.CompressionAlgorithm = &compression.Zstd - } - } - - ic.c.printCopyInfo("blob", srcInfo) - - cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" - diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" - // When encrypting to decrypting, only use the simple code path. We might be able to optimize more - // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), - // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. - encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) - canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting - - // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. - if canAvoidProcessingCompleteLayer { - canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType) - logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", - srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) - canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType) - // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm - // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing - // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause - // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. - // Fixing that will probably require passing more information to TryReusingBlob() than the current version of - // the ImageDestination interface lets us pass in. - reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ - Cache: ic.c.blobInfoCache, - CanSubstitute: canSubstitute, - EmptyLayer: emptyLayer, - LayerIndex: &layerIndex, - SrcRef: srcRef, - }) - if err != nil { - return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err) - } - if reused { - logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) - func() { // A scope for defer - bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists") - defer bar.Abort(false) - bar.mark100PercentComplete() - }() - - // Throw an event that the layer has been skipped - if ic.c.progress != nil && ic.c.progressInterval > 0 { - ic.c.progress <- types.ProgressProperties{ - Event: types.ProgressEventSkipped, - Artifact: srcInfo, - } - } - - // If the reused blob has the same digest as the one we asked for, but - // the transport didn't/couldn't supply compression info, fill it in based - // on what we know from the srcInfos we were given. - // If the srcInfos came from LayerInfosForCopy(), then UpdatedImage() will - // call UpdateLayerInfos(), which uses this information to compute the - // MediaType value for the updated layer infos, and it the transport - // didn't pass the information along from its input to its output, then - // it can derive the MediaType incorrectly. - if blobInfo.Digest == srcInfo.Digest && blobInfo.CompressionAlgorithm == nil { - blobInfo.CompressionOperation = srcInfo.CompressionOperation - blobInfo.CompressionAlgorithm = srcInfo.CompressionAlgorithm - } - return blobInfo, cachedDiffID, nil - } - } - - // A partial pull is managed by the destination storage, that decides what portions - // of the source file are not known yet and must be fetched. - // Attempt a partial only when the source allows to retrieve a blob partially and - // the destination has support for it. - if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { - if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer - bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") - hideProgressBar := true - defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily. - bar.Abort(hideProgressBar) - }() - - proxy := blobChunkAccessorProxy{ - wrapped: ic.c.rawSource, - bar: bar, - } - info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) - if err == nil { - if srcInfo.Size != -1 { - bar.SetRefill(srcInfo.Size - bar.Current()) - } - bar.mark100PercentComplete() - hideProgressBar = false - logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) - return true, info - } - logrus.Debugf("Failed to retrieve partial blob: %v", err) - return false, types.BlobInfo{} - }(); reused { - return blobInfo, cachedDiffID, nil - } - } - - // Fallback: copy the layer, computing the diffID if we need to do so - return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer - bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") - defer bar.Abort(false) - - srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) - if err != nil { - return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) - } - defer srcStream.Close() - - blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) - if err != nil { - return types.BlobInfo{}, "", err - } - - diffID := cachedDiffID - if diffIDIsNeeded { - select { - case <-ctx.Done(): - return types.BlobInfo{}, "", ctx.Err() - case diffIDResult := <-diffIDChan: - if diffIDResult.err != nil { - return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err) - } - logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - // Don’t record any associations that involve encrypted data. This is a bit crude, - // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) - // might be safe, but it’s not trivially obvious, so let’s be conservative for now. - // This crude approach also means we don’t need to record whether a blob is encrypted - // in the blob info cache (which would probably be necessary for any more complex logic), - // and the simplicity is attractive. - if !encryptingOrDecrypting { - // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process - // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. - ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) - } - diffID = diffIDResult.digest - } - } - - bar.mark100PercentComplete() - return blobInfo, diffID, nil - }() -} - -// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. -// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, -// perhaps (de/re/)compressing the stream, -// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. -func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { - var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil - var diffIDChan chan diffIDResult - - err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow - if diffIDIsNeeded { - diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. - pipeReader, pipeWriter := io.Pipe() - defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. - _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil - }() - - getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer { - // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further - // reading from the pipe has failed, we don’t really care. - // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, - // the return value includes an error indication, which we do check. - // - // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be - // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. - go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader - return pipeWriter - } - } - - blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success - return blobInfo, diffIDChan, err - // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan -} - -// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. -func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) { - result := diffIDResult{ - digest: "", - err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), - } - defer func() { dest <- result }() - defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. - - result.digest, result.err = computeDiffID(layerStream, decompressor) -} - -// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. -func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) { - if decompressor != nil { - s, err := decompressor(stream) - if err != nil { - return "", err - } - defer s.Close() - stream = s - } - - return digest.Canonical.FromReader(stream) -} diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go index 5eae8bfcd..54aca9e57 100644 --- a/vendor/github.com/containers/image/v5/copy/encryption.go +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -7,6 +7,8 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/ocicrypt" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) // isOciEncrypted returns a bool indicating if a mediatype is encrypted @@ -18,12 +20,9 @@ func isOciEncrypted(mediatype string) bool { // isEncrypted checks if an image is encrypted func isEncrypted(i types.Image) bool { layers := i.LayerInfos() - for _, l := range layers { - if isOciEncrypted(l.MediaType) { - return true - } - } - return false + return slices.ContainsFunc(layers, func(l types.BlobInfo) bool { + return isOciEncrypted(l.MediaType) + }) } // bpDecryptionStepData contains data that the copy pipeline needs about the decryption step. @@ -47,11 +46,9 @@ func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types. stream.reader = reader stream.info.Digest = decryptedDigest stream.info.Size = -1 - for k := range stream.info.Annotations { - if strings.HasPrefix(k, "org.opencontainers.image.enc") { - delete(stream.info.Annotations, k) - } - } + maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { + return strings.HasPrefix(k, "org.opencontainers.image.enc") + }) return &bpDecryptionStepData{ decrypting: true, }, nil @@ -122,8 +119,6 @@ func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *ty if *annotations == nil { *annotations = map[string]string{} } - for k, v := range encryptAnnotations { - (*annotations)[k] = v - } + maps.Copy(*annotations, encryptAnnotations) return nil } diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index df12e50c0..a35ea4220 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -6,9 +6,11 @@ import ( "fmt" "strings" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. @@ -19,7 +21,7 @@ var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, man // orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once. type orderedSet struct { list []string - included map[string]struct{} + included *set.Set[string] } // newOrderedSet creates a correctly initialized orderedSet. @@ -27,15 +29,15 @@ type orderedSet struct { func newOrderedSet() *orderedSet { return &orderedSet{ list: []string{}, - included: map[string]struct{}{}, + included: set.New[string](), } } // append adds s to the end of os, only if it is not included already. func (os *orderedSet) append(s string) { - if _, ok := os.included[s]; !ok { + if !os.included.Contains(s) { os.list = append(os.list, s) - os.included[s] = struct{}{} + os.included.Add(s) } } @@ -80,10 +82,10 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest otherMIMETypeCandidates: []string{}, }, nil } - supportedByDest := map[string]struct{}{} + supportedByDest := set.New[string]() for _, t := range destSupportedManifestMIMETypes { if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) { - supportedByDest[t] = struct{}{} + supportedByDest.Add(t) } } @@ -96,7 +98,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest prioritizedTypes := newOrderedSet() // First of all, prefer to keep the original manifest unmodified. - if _, ok := supportedByDest[srcType]; ok { + if supportedByDest.Contains(srcType) { prioritizedTypes.append(srcType) } if in.cannotModifyManifestReason != "" { @@ -113,7 +115,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest // Then use our list of preferred types. for _, t := range preferredManifestMIMETypes { - if _, ok := supportedByDest[t]; ok { + if supportedByDest.Contains(t) { prioritizedTypes.append(t) } } @@ -166,11 +168,8 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport prioritizedTypes := newOrderedSet() // The first priority is the current type, if it's in the list, since that lets us avoid a // conversion that isn't strictly necessary. - for _, t := range destSupportedMIMETypes { - if t == currentListMIMEType { - prioritizedTypes.append(currentListMIMEType) - break - } + if slices.Contains(destSupportedMIMETypes, currentListMIMEType) { + prioritizedTypes.append(currentListMIMEType) } // Pick out the other list types that we support. for _, t := range destSupportedMIMETypes { diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go new file mode 100644 index 000000000..097a18855 --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -0,0 +1,198 @@ +package copy + +import ( + "bytes" + "context" + "errors" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/signature" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" +) + +// copyMultipleImages copies some or all of an image list's instances, using +// policyContext to validate source image admissibility. +func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) { + // Parse the list and get a copy of the original value after it's re-encoded. + manifestList, manifestType, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, fmt.Errorf("reading manifest list: %w", err) + } + originalList, err := internalManifest.ListFromBlob(manifestList, manifestType) + if err != nil { + return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err) + } + updatedList := originalList.CloneInternal() + + sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options, + "Getting image list signatures", + "Checking if image list destination supports signatures") + if err != nil { + return nil, err + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + return nil, errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + + // Determine if we're allowed to modify the manifest list. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copySingleImage. + cannotModifyManifestListReason := "" + if len(sigs) > 0 { + cannotModifyManifestListReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestListReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestListReason = "Instructed to preserve digests" + } + + // Determine if we'll need to convert the manifest list to a different format. + forceListMIMEType := options.ForceManifestMIMEType + switch forceListMIMEType { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + forceListMIMEType = manifest.DockerV2ListMediaType + case imgspecv1.MediaTypeImageManifest: + forceListMIMEType = imgspecv1.MediaTypeImageIndex + } + selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) + if err != nil { + return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err) + } + if selectedListType != originalList.MIMEType() { + if cannotModifyManifestListReason != "" { + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason) + } + } + + // Copy each image, or just the ones we want to copy, in turn. + instanceDigests := updatedList.Instances() + imagesToCopy := len(instanceDigests) + if options.ImageListSelection == CopySpecificImages { + imagesToCopy = len(options.Instances) + } + c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests)) + updates := make([]manifest.ListUpdate, len(instanceDigests)) + instancesCopied := 0 + for i, instanceDigest := range instanceDigests { + if options.ImageListSelection == CopySpecificImages && + !slices.Contains(options.Instances, instanceDigest) { + update, err := updatedList.Instance(instanceDigest) + if err != nil { + return nil, err + } + logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + // Record the digest/size/type of the manifest that we didn't copy. + updates[i] = update + continue + } + logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests)) + c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy) + unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest) + updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest) + if err != nil { + return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err) + } + instancesCopied++ + // Record the result of a possible conversion here. + update := manifest.ListUpdate{ + Digest: updatedManifestDigest, + Size: int64(len(updatedManifest)), + MediaType: updatedManifestType, + } + updates[i] = update + } + + // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. + if err = updatedList.UpdateInstances(updates); err != nil { + return nil, fmt.Errorf("updating manifest list: %w", err) + } + + // Iterate through supported list types, preferred format first. + c.Printf("Writing manifest list to image destination\n") + var errs []string + for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) { + var attemptedList internalManifest.ListPublic = updatedList + + logrus.Debugf("Trying to use manifest list type %s…", thisListType) + + // Perform the list conversion, if we need one. + if thisListType != updatedList.MIMEType() { + attemptedList, err = updatedList.ConvertToMIMEType(thisListType) + if err != nil { + return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err) + } + } + + // Check if the updates or a type conversion meaningfully changed the list of images + // by serializing them both so that we can compare them. + attemptedManifestList, err := attemptedList.Serialize() + if err != nil { + return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err) + } + originalManifestList, err := originalList.Serialize() + if err != nil { + return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err) + } + + // If we can't just use the original value, but we have to change it, flag an error. + if !bytes.Equal(attemptedManifestList, originalManifestList) { + if cannotModifyManifestListReason != "" { + return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason) + } + logrus.Debugf("Manifest list has been updated") + } else { + // We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest. + attemptedManifestList = manifestList + } + + // Save the manifest list. + err = c.dest.PutManifest(ctx, attemptedManifestList, nil) + if err != nil { + logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err)) + continue + } + errs = nil + manifestList = attemptedManifestList + break + } + if errs != nil { + return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + + // Sign the manifest list. + newSigs, err := c.createSignatures(ctx, manifestList, options.SignIdentity) + if err != nil { + return nil, err + } + sigs = append(sigs, newSigs...) + + c.Printf("Storing list signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil { + return nil, fmt.Errorf("writing signatures: %w", err) + } + + return manifestList, nil +} diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go index 85676f01c..25f246363 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_bars.go +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -7,8 +7,8 @@ import ( "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" - "github.com/vbauerster/mpb/v7" - "github.com/vbauerster/mpb/v7/decor" + "github.com/vbauerster/mpb/v8" + "github.com/vbauerster/mpb/v8/decor" ) // newProgressPool creates a *mpb.Progress. @@ -120,7 +120,7 @@ func (bar *progressBar) mark100PercentComplete() { bar.SetCurrent(bar.originalSize) // This triggers the completion condition. } else { // -1 = unknown size - // 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known + // 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known // size (possible at least in theory), in mpb, zero-sized progress bars are treated // as unknown size, in particular they are not configured to be marked as // complete on bar.Current() reaching bar.total (because that would happen already diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go new file mode 100644 index 000000000..9afdea73d --- /dev/null +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -0,0 +1,818 @@ +package copy + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/image" + "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/set" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "github.com/vbauerster/mpb/v8" + "golang.org/x/exp/slices" +) + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src *image.SourcedImage + diffIDsAreNeeded bool + cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can + canSubstituteBlobs bool + compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil. + compressionLevel *int + ociEncryptLayers *[]int +} + +// copySingleImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate +// source image admissibility. +func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(ctx, unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err) + } + if multiImage { + return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + + // Please keep this policy check BEFORE reading any other information about the image. + // (The multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) + if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return nil, "", "", fmt.Errorf("Source image rejected: %w", err) + } + src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) + if err != nil { + return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err) + } + + // If the destination is a digested reference, make a note of that, determine what digest value we're + // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's + // one item from a manifest list that matches it, accept that as a match. + destIsDigestedReference := false + if named := c.dest.Reference().DockerReference(); named != nil { + if digested, ok := named.(reference.Digested); ok { + destIsDigestedReference = true + matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest()) + if err != nil { + return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + manifestList, _, err := unparsedToplevel.Manifest(ctx) + if err != nil { + return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err) + } + matches, err = manifest.MatchesDigest(manifestList, digested.Digest()) + if err != nil { + return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err) + } + if !matches { + return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference") + } + } + } + } + + if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil { + return nil, "", "", err + } + + sigs, err := c.sourceSignatures(ctx, src, options, + "Getting image source signatures", + "Checking if image destination supports signatures") + if err != nil { + return nil, "", "", err + } + + // Determine if we're allowed to modify the manifest. + // If we can, set to the empty string. If we can't, set to the reason why. + // Compare, and perhaps keep in sync with, the version in copyMultipleImages. + cannotModifyManifestReason := "" + if len(sigs) > 0 { + cannotModifyManifestReason = "Would invalidate signatures" + } + if destIsDigestedReference { + cannotModifyManifestReason = "Destination specifies a digest" + } + if options.PreserveDigests { + cannotModifyManifestReason = "Instructed to preserve digests" + } + + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // diffIDsAreNeeded is computed later + cannotModifyManifestReason: cannotModifyManifestReason, + ociEncryptLayers: options.OciEncryptLayers, + } + if options.DestinationCtx != nil { + // Note that compressionFormat and compressionLevel can be nil. + ic.compressionFormat = options.DestinationCtx.CompressionFormat + ic.compressionLevel = options.DestinationCtx.CompressionLevel + } + // Decide whether we can substitute blobs with semantic equivalents: + // - Don’t do that if we can’t modify the manifest at all + // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. + // This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path: + // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. + // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk + // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, + // and we would reuse and sign it. + ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0 + + if err := ic.updateEmbeddedDockerReference(); err != nil { + return nil, "", "", err + } + + destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil + + manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{ + srcMIMEType: ic.src.ManifestMIMEType, + destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(), + forceManifestMIMEType: options.ForceManifestMIMEType, + requiresOCIEncryption: destRequiresOciEncryption, + cannotModifyManifestReason: ic.cannotModifyManifestReason, + }) + if err != nil { + return nil, "", "", err + } + // We set up this part of ic.manifestUpdates quite early, not just around the + // code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code + // (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based + // on the expected destination format. + if manifestConversionPlan.preferredMIMETypeNeedsConversion { + ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType + } + + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) + + // If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal + if options.OptimizeDestinationImageAlreadyExists { + shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible + noPendingManifestUpdates := ic.noPendingManifestUpdates() + + logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates) + if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates { + isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest) + if err != nil { + logrus.Warnf("Failed to compare destination image manifest: %v", err) + return nil, "", "", err + } + + if isSrcDestManifestEqual { + c.Printf("Skipping: image already present at destination\n") + return retManifest, retManifestType, retManifestDigest, nil + } + } + } + + if err := ic.copyLayers(ctx); err != nil { + return nil, "", "", err + } + + // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; + // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support + // without actually trying to upload something and getting a types.ManifestTypeRejectedError. + // So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if + // we're altering how they're compressed. If the process succeeds, fine… + manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + retManifestType = manifestConversionPlan.preferredMIMEType + if err != nil { + logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err) + // … if it fails, and the failure is either because the manifest is rejected by the registry, or + // because we failed to create a manifest of the specified type because the specific manifest type + // doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may + // have other options available that could still succeed. + var manifestTypeRejectedError types.ManifestTypeRejectedError + var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError + isManifestRejected := errors.As(err, &manifestTypeRejectedError) + isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError) + if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 { + // We don’t have other options. + // In principle the code below would handle this as well, but the resulting error message is fairly ugly. + // Don’t bother the user with MIME types if we have no choice. + return nil, "", "", err + } + // If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType. + // So if we are here, we will definitely be trying to convert the manifest. + // With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason, + // so let’s bail out early and with a better error message. + if ic.cannotModifyManifestReason != "" { + return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err) + } + + // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. + errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)} + for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates { + logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType + attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) + errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) + continue + } + + // We have successfully uploaded a manifest. + manifestBytes = attemptedManifest + retManifestDigest = attemptedManifestDigest + retManifestType = manifestMIMEType + errs = nil // Mark this as a success so that we don't abort below. + break + } + if errs != nil { + return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) + } + } + if targetInstance != nil { + targetInstance = &retManifestDigest + } + + newSigs, err := c.createSignatures(ctx, manifestBytes, options.SignIdentity) + if err != nil { + return nil, "", "", err + } + sigs = append(sigs, newSigs...) + + c.Printf("Storing signatures\n") + if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil { + return nil, "", "", fmt.Errorf("writing signatures: %w", err) + } + + return manifestBytes, retManifestType, retManifestDigest, nil +} + +// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary. +func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { + if dest.MustMatchRuntimeOS() { + c, err := src.OCIConfig(ctx) + if err != nil { + return fmt.Errorf("parsing image configuration: %w", err) + } + wantedPlatforms, err := platform.WantedPlatforms(sys) + if err != nil { + return fmt.Errorf("getting current platform information %#v: %w", sys, err) + } + + options := newOrderedSet() + match := false + for _, wantedPlatform := range wantedPlatforms { + // Waiting for https://github.com/opencontainers/image-spec/pull/777 : + // This currently can’t use image.MatchesPlatform because we don’t know what to use + // for image.Variant. + if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture { + match = true + break + } + options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture)) + } + if !match { + logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q", + c.OS, c.Architecture, strings.Join(options.list, ", ")) + } + } + return nil +} + +// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. +func (ic *imageCopier) updateEmbeddedDockerReference() error { + if ic.c.dest.IgnoresEmbeddedDockerReference() { + return nil // Destination would prefer us not to update the embedded reference. + } + destRef := ic.c.dest.Reference().DockerReference() + if destRef == nil { + return nil // Destination does not care about Docker references + } + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { + return nil // No reference embedded in the manifest, or it matches destRef already. + } + + if ic.cannotModifyManifestReason != "" { + return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q", + transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason) + } + ic.manifestUpdates.EmbeddedDockerReference = destRef + return nil +} + +func (ic *imageCopier) noPendingManifestUpdates() bool { + return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) +} + +// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the +// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal. +func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) { + srcManifestDigest, err := manifest.Digest(src.ManifestBlob) + if err != nil { + return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err) + } + + destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx) + if err != nil { + logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err) + return false, nil, "", "", nil + } + + destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance) + if err != nil { + logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err) + return false, nil, "", "", nil + } + + destManifestDigest, err := manifest.Digest(destManifest) + if err != nil { + return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err) + } + + logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest) + if srcManifestDigest != destManifestDigest { + return false, nil, "", "", nil + } + + // Destination and source manifests, types and digests should all be equivalent + return true, destManifest, destManifestType, destManifestDigest, nil +} + +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "". +func (ic *imageCopier) copyLayers(ctx context.Context) error { + srcInfos := ic.src.LayerInfos() + numLayers := len(srcInfos) + updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) + if err != nil { + return err + } + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if ic.cannotModifyManifestReason != "" { + return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason) + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } + + type copyLayerData struct { + destInfo types.BlobInfo + diffID digest.Digest + err error + } + + // The manifest is used to extract the information whether a given + // layer is empty. + man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType) + if err != nil { + return err + } + manifestLayerInfos := man.LayerInfos() + + // copyGroup is used to determine if all layers are copied + copyGroup := sync.WaitGroup{} + + data := make([]copyLayerData, numLayers) + copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) { + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) + defer copyGroup.Done() + cld := copyLayerData{} + if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + // DiffIDs are, currently, needed only when converting from schema1. + // In which case src.LayerInfos will not have URLs because schema1 + // does not support them. + if ic.diffIDsAreNeeded { + cld.err = errors.New("getting DiffID for foreign layers is unimplemented") + } else { + cld.destInfo = srcLayer + logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) + } + } else { + cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer) + } + data[index] = cld + } + + // Decide which layers to encrypt + layersToEncrypt := set.New[int]() + var encryptAll bool + if ic.ociEncryptLayers != nil { + encryptAll = len(*ic.ociEncryptLayers) == 0 + totalLayers := len(srcInfos) + for _, l := range *ic.ociEncryptLayers { + // if layer is negative, it is reverse indexed. + layersToEncrypt.Add((totalLayers + l) % totalLayers) + } + + if encryptAll { + for i := 0; i < len(srcInfos); i++ { + layersToEncrypt.Add(i) + } + } + } + + if err := func() error { // A scope for defer + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + + // Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool. + defer copyGroup.Wait() + + for i, srcLayer := range srcInfos { + err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1) + if err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying layer: %w", err) + } + copyGroup.Add(1) + go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference()) + } + + // A call to copyGroup.Wait() is done at this point by the defer above. + return nil + }(); err != nil { + return err + } + + destInfos := make([]types.BlobInfo, numLayers) + diffIDs := make([]digest.Digest, numLayers) + for i, cld := range data { + if cld.err != nil { + return cld.err + } + destInfos[i] = cld.destInfo + diffIDs[i] = cld.diffID + } + + // WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the + // OptimizeDestinationImageAlreadyExists short-circuit conditions + ic.manifestUpdates.InformationOnly.LayerInfos = destInfos + if ic.diffIDsAreNeeded { + ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs + } + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { + ic.manifestUpdates.LayerInfos = destInfos + } + return nil +} + +// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields) +func layerDigestsDiffer(a, b []types.BlobInfo) bool { + return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool { + return a.Digest == b.Digest + }) +} + +// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, +// stores the resulting config and manifest to the destination, and returns the stored manifest +// and its digest. +func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) { + var pendingImage types.Image = ic.src + if !ic.noPendingManifestUpdates() { + if ic.cannotModifyManifestReason != "" { + return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason) + } + if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { + // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. + // So, this can only happen if we are trying to upload using one of the other MIME type candidates. + // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. + // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. + return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) + } + pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) + if err != nil { + return nil, "", fmt.Errorf("creating an updated image manifest: %w", err) + } + pendingImage = pi + } + man, _, err := pendingImage.Manifest(ctx) + if err != nil { + return nil, "", fmt.Errorf("reading manifest: %w", err) + } + + if err := ic.copyConfig(ctx, pendingImage); err != nil { + return nil, "", err + } + + ic.c.Printf("Writing manifest to image destination\n") + manifestDigest, err := manifest.Digest(man) + if err != nil { + return nil, "", err + } + if instanceDigest != nil { + instanceDigest = &manifestDigest + } + if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil { + logrus.Debugf("Error %v while writing manifest %q", err, string(man)) + return nil, "", fmt.Errorf("writing manifest: %w", err) + } + return man, manifestDigest, nil +} + +// copyConfig copies config.json, if any, from src to dest. +func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error { + srcInfo := src.ConfigInfo() + if srcInfo.Digest != "" { + if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil { + // This can only fail with ctx.Err(), so no need to blame acquiring the semaphore. + return fmt.Errorf("copying config: %w", err) + } + defer ic.c.concurrentBlobCopiesSemaphore.Release(1) + + destInfo, err := func() (types.BlobInfo, error) { // A scope for defer + progressPool := ic.c.newProgressPool() + defer progressPool.Wait() + bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done") + defer bar.Abort(false) + ic.c.printCopyInfo("config", srcInfo) + + configBlob, err := src.ConfigBlob(ctx) + if err != nil { + return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err) + } + + destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false) + if err != nil { + return types.BlobInfo{}, err + } + + bar.mark100PercentComplete() + return destInfo, nil + }() + if err != nil { + return err + } + if destInfo.Digest != srcInfo.Digest { + return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) + } + } + return nil +} + +// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. +// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. +type diffIDResult struct { + digest digest.Digest + err error +} + +// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it, +// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded +// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil. +func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) { + // If the srcInfo doesn't contain compression information, try to compute it from the + // MediaType, which was either read from a manifest by way of LayerInfos() or constructed + // by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob, + // the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(), + // which uses the compression information to compute the updated MediaType values. + // (Sadly UpdatedImage() is documented to not update MediaTypes from + // ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.) + // + // This MIME type → compression mapping belongs in manifest-specific code in our manifest + // package (but we should preferably replace/change UpdatedImage instead of productizing + // this workaround). + if srcInfo.CompressionAlgorithm == nil { + switch srcInfo.MediaType { + case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip: + srcInfo.CompressionAlgorithm = &compression.Gzip + case imgspecv1.MediaTypeImageLayerZstd: + srcInfo.CompressionAlgorithm = &compression.Zstd + } + } + + ic.c.printCopyInfo("blob", srcInfo) + + cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + // When encrypting to decrypting, only use the simple code path. We might be able to optimize more + // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), + // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. + encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil) + canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting + + // Don’t read the layer from the source if we already have the blob, and optimizations are acceptable. + if canAvoidProcessingCompleteLayer { + canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType) + logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", + srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) + canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType) + // TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm + // that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing + // a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause + // a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob. + // Fixing that will probably require passing more information to TryReusingBlob() than the current version of + // the ImageDestination interface lets us pass in. + reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{ + Cache: ic.c.blobInfoCache, + CanSubstitute: canSubstitute, + EmptyLayer: emptyLayer, + LayerIndex: &layerIndex, + SrcRef: srcRef, + }) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err) + } + if reused { + logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) + func() { // A scope for defer + bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists") + defer bar.Abort(false) + bar.mark100PercentComplete() + }() + + // Throw an event that the layer has been skipped + if ic.c.progress != nil && ic.c.progressInterval > 0 { + ic.c.progress <- types.ProgressProperties{ + Event: types.ProgressEventSkipped, + Artifact: srcInfo, + } + } + + return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil + } + } + + // A partial pull is managed by the destination storage, that decides what portions + // of the source file are not known yet and must be fetched. + // Attempt a partial only when the source allows to retrieve a blob partially and + // the destination has support for it. + if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { + if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer + bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") + hideProgressBar := true + defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily. + bar.Abort(hideProgressBar) + }() + + proxy := blobChunkAccessorProxy{ + wrapped: ic.c.rawSource, + bar: bar, + } + uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache) + if err == nil { + if srcInfo.Size != -1 { + bar.SetRefill(srcInfo.Size - bar.Current()) + } + bar.mark100PercentComplete() + hideProgressBar = false + logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) + return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob) + } + logrus.Debugf("Failed to retrieve partial blob: %v", err) + return false, types.BlobInfo{} + }(); reused { + return blobInfo, cachedDiffID, nil + } + } + + // Fallback: copy the layer, computing the diffID if we need to do so + return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer + bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") + defer bar.Abort(false) + + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) + if err != nil { + return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err) + } + defer srcStream.Close() + + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) + if err != nil { + return types.BlobInfo{}, "", err + } + + diffID := cachedDiffID + if diffIDIsNeeded { + select { + case <-ctx.Done(): + return types.BlobInfo{}, "", ctx.Err() + case diffIDResult := <-diffIDChan: + if diffIDResult.err != nil { + return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err) + } + logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) + // Don’t record any associations that involve encrypted data. This is a bit crude, + // some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes) + // might be safe, but it’s not trivially obvious, so let’s be conservative for now. + // This crude approach also means we don’t need to record whether a blob is encrypted + // in the blob info cache (which would probably be necessary for any more complex logic), + // and the simplicity is attractive. + if !encryptingOrDecrypting { + // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process + // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. + ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) + } + diffID = diffIDResult.digest + } + } + + bar.mark100PercentComplete() + return blobInfo, diffID, nil + }() +} + +// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo. +func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo { + // The transport is only tasked with finding the blob, determining its size if necessary, and returning the right + // compression format if the blob was substituted. + // Handling of compression, encryption, and the related MIME types and the like are all the responsibility + // of the generic code in this package. + res := types.BlobInfo{ + Digest: reusedBlob.Digest, + Size: reusedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls) + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. + CompressionOperation: reusedBlob.CompressionOperation, + CompressionAlgorithm: reusedBlob.CompressionAlgorithm, + CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway. + } + // The transport is only expected to fill CompressionOperation and CompressionAlgorithm + // if the blob was substituted; otherwise, fill it in based + // on what we know from the srcInfos we were given. + if reusedBlob.Digest == inputInfo.Digest { + res.CompressionOperation = inputInfo.CompressionOperation + res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + } + return res +} + +// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. +// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, +// perhaps (de/re/)compressing the stream, +// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. +func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, + diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil + var diffIDChan chan diffIDResult + + err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow + if diffIDIsNeeded { + diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. + pipeReader, pipeWriter := io.Pipe() + defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. + _ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil + }() + + getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer { + // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further + // reading from the pipe has failed, we don’t really care. + // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, + // the return value includes an error indication, which we do check. + // + // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be + // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. + go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader + return pipeWriter + } + } + + blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success + return blobInfo, diffIDChan, err + // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan +} + +// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. +func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) { + result := diffIDResult{ + digest: "", + err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), + } + defer func() { dest <- result }() + defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. + + result.digest, result.err = computeDiffID(layerStream, decompressor) +} + +// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. +func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) { + if decompressor != nil { + s, err := decompressor(stream) + if err != nil { + return "", err + } + defer s.Close() + stream = s + } + + return digest.Canonical.FromReader(stream) +} diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 55b29fe17..974d23d5f 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -132,11 +132,11 @@ func (d *dirImageDestination) Close() error { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob") if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } succeeded := false explicitClosed := false @@ -153,14 +153,14 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(blobFile, stream) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. @@ -169,7 +169,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // always fails on Windows. if runtime.GOOS != "windows" { if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } } @@ -178,32 +178,30 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io. blobFile.Close() explicitClosed = true if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } succeeded = true - return types.BlobInfo{Digest: blobDigest, Size: size}, nil + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { if info.Digest == "" { - return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with unknown digest") + return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest") } blobPath := d.ref.layerPath(info.Digest) finfo, err := os.Stat(blobPath) if err != nil && os.IsNotExist(err) { - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } - return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil + return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil } // PutManifest writes manifest to the destination. diff --git a/vendor/github.com/containers/image/v5/directory/directory_src.go b/vendor/github.com/containers/image/v5/directory/directory_src.go index 98efdedd7..5fc83bb6f 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_src.go +++ b/vendor/github.com/containers/image/v5/directory/directory_src.go @@ -8,9 +8,9 @@ import ( "github.com/containers/image/v5/internal/imagesource/impl" "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) diff --git a/vendor/github.com/containers/image/v5/directory/directory_transport.go b/vendor/github.com/containers/image/v5/directory/directory_transport.go index 253ecb247..7e3068693 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_transport.go +++ b/vendor/github.com/containers/image/v5/directory/directory_transport.go @@ -89,7 +89,7 @@ func (ref dirReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref dirReference) StringWithinTransport() string { return ref.path diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go index b4ff4d08a..32ae1ae8a 100644 --- a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go +++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go @@ -9,7 +9,7 @@ import ( // ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. // To do so, all elements of the input path must exist; as a special case, the final component may be // a non-existent name (but not a symlink pointing to a non-existent name) -// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. +// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. func ResolvePathToFullyExplicit(path string) (string, error) { switch _, err := os.Lstat(path); { case err == nil: diff --git a/vendor/github.com/containers/image/v5/docker/archive/src.go b/vendor/github.com/containers/image/v5/docker/archive/src.go index 5604a5121..c4ab9a8c1 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/src.go +++ b/vendor/github.com/containers/image/v5/docker/archive/src.go @@ -1,8 +1,6 @@ package archive import ( - "context" - "github.com/containers/image/v5/docker/internal/tarfile" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/types" @@ -15,7 +13,7 @@ type archiveImageSource struct { // newImageSource returns a types.ImageSource for the specified image reference. // The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) { +func newImageSource(sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) { var archive *tarfile.Reader var closeArchive bool if ref.archiveReader != nil { diff --git a/vendor/github.com/containers/image/v5/docker/archive/transport.go b/vendor/github.com/containers/image/v5/docker/archive/transport.go index 304a8c618..39e92cac3 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/transport.go +++ b/vendor/github.com/containers/image/v5/docker/archive/transport.go @@ -62,24 +62,23 @@ func ParseReference(refString string) (types.ImageReference, error) { return nil, fmt.Errorf("docker-archive reference %s isn't of the form [:]", refString) } - parts := strings.SplitN(refString, ":", 2) - path := parts[0] + path, tagOrIndex, gotTagOrIndex := strings.Cut(refString, ":") var nt reference.NamedTagged sourceIndex := -1 - if len(parts) == 2 { + if gotTagOrIndex { // A :tag or :@index was specified. - if len(parts[1]) > 0 && parts[1][0] == '@' { - i, err := strconv.Atoi(parts[1][1:]) + if len(tagOrIndex) > 0 && tagOrIndex[0] == '@' { + i, err := strconv.Atoi(tagOrIndex[1:]) if err != nil { - return nil, fmt.Errorf("Invalid source index %s: %w", parts[1], err) + return nil, fmt.Errorf("Invalid source index %s: %w", tagOrIndex, err) } if i < 0 { return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i) } sourceIndex = i } else { - ref, err := reference.ParseNormalizedNamed(parts[1]) + ref, err := reference.ParseNormalizedNamed(tagOrIndex) if err != nil { return nil, fmt.Errorf("docker-archive parsing reference: %w", err) } @@ -137,7 +136,7 @@ func (ref archiveReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref archiveReference) StringWithinTransport() string { switch { @@ -191,7 +190,7 @@ func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemConte // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, sys, ref) + return newImageSource(sys, ref) } // NewImageDestination returns a types.ImageDestination for this reference. diff --git a/vendor/github.com/containers/image/v5/docker/archive/writer.go b/vendor/github.com/containers/image/v5/docker/archive/writer.go index 315c282ca..11f797c00 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/writer.go +++ b/vendor/github.com/containers/image/v5/docker/archive/writer.go @@ -19,7 +19,7 @@ type Writer struct { archive *tarfile.Writer writer io.Closer - // The following state can only be acccessed with the mutex held. + // The following state can only be accessed with the mutex held. mutex sync.Mutex hadCommit bool // At least one successful commit has happened } diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go index 1bc8c35fb..7d66ef6bc 100644 --- a/vendor/github.com/containers/image/v5/docker/body_reader.go +++ b/vendor/github.com/containers/image/v5/docker/body_reader.go @@ -41,7 +41,7 @@ type bodyReader struct { } // newBodyReader creates a bodyReader for request path in c. -// firstBody is an already correctly opened body for the blob, returing the full blob from the start. +// firstBody is an already correctly opened body for the blob, returning the full blob from the start. // If reading from firstBody fails, bodyReader may heuristically decide to resume. func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) { logURL, err := c.resolveRequestURL(path) @@ -193,7 +193,7 @@ func (br *bodyReader) Read(p []byte) (int, error) { return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err) } - logrus.Debugf("Succesfully reconnected to %s", redactedURL) + logrus.Debugf("Successfully reconnected to %s", redactedURL) consumedBody = true br.body = res.Body br.lastRetryOffset = br.offset diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go index dc4aa70d3..59e02462f 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -69,6 +69,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem // imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { + defer c.Close() err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") defer func() { logrus.Debugf("docker-daemon: sending done, status %v", err) diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go index b57936654..10923c278 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go @@ -28,6 +28,8 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef if err != nil { return nil, fmt.Errorf("initializing docker engine client: %w", err) } + defer c.Close() + // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. // Either way ImageSave should create a tarball with exactly one image. inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go index ad2189084..9eedff745 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -118,7 +118,7 @@ func (ref daemonReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; // instead, see transports.ImageName(). func (ref daemonReference) StringWithinTransport() string { diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 9e8fece03..570cca483 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -213,6 +213,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) // signatureBase is always set in the return value +// The caller must call .Close() on the returned client when done. func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) { auth, err := config.GetCredentialsForRef(sys, ref.ref) if err != nil { @@ -247,6 +248,7 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, regis // (e.g., "registry.com[:5000][/some/namespace]/repo"). // Please note that newDockerClient does not set all members of dockerClient // (e.g., username and password); those must be set by callers if necessary. +// The caller must call .Close() on the returned client when done. func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { hostName := registry if registry == dockerHostname { @@ -302,6 +304,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password if err != nil { return fmt.Errorf("creating new docker client: %w", err) } + defer client.Close() client.auth = types.DockerAuthConfig{ Username: username, Password: password, @@ -371,6 +374,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima if err != nil { return nil, fmt.Errorf("creating new docker client: %w", err) } + defer client.Close() client.auth = auth if sys != nil { client.registryToken = sys.DockerBearerRegistryToken @@ -448,8 +452,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima if link == "" { break } - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) + linkURLPart, _, _ := strings.Cut(link, ";") + linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>")) if err != nil { return searchRes, err } @@ -596,7 +600,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri case <-time.After(delay): // Nothing } - delay = delay * 2 // If the registry does not specify a delay, back off exponentially. + delay *= 2 // If the registry does not specify a delay, back off exponentially. } } @@ -985,7 +989,7 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty return reconnectingReader, blobSize, nil } -// getOCIDescriptorContents returns the contents a blob spcified by descriptor in ref, which must fit within limit. +// getOCIDescriptorContents returns the contents a blob specified by descriptor in ref, which must fit within limit. func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) { // Note that this copies all kinds of attachments: attestations, and whatever else is there, // not just signatures. We leave the signature consumers to decide based on the MIME type. @@ -994,7 +998,7 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR return nil, err } defer reader.Close() - payload, err := iolimits.ReadAtMost(reader, iolimits.MaxSignatureBodySize) + payload, err := iolimits.ReadAtMost(reader, maxSize) if err != nil { return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err) } @@ -1084,3 +1088,11 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe func sigstoreAttachmentTag(d digest.Digest) string { return strings.Replace(d.String(), ":", "-", 1) + ".sig" } + +// Close removes resources associated with an initialized dockerClient, if any. +func (c *dockerClient) Close() error { + if c.client != nil { + c.client.CloseIdleConnections() + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go index 6a4331e33..42bbfd95e 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -68,6 +68,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. if err != nil { return nil, fmt.Errorf("failed to create client: %w", err) } + defer client.Close() tags := make([]string, 0) @@ -94,8 +95,8 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. break } - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) + linkURLPart, _, _ := strings.Cut(link, ";") + linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>")) if err != nil { return tags, err } @@ -136,6 +137,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef if err != nil { return "", fmt.Errorf("failed to create client: %w", err) } + defer client.Close() path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest) headers := map[string][]string{ diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 7a7f72d9a..44e2aea23 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -21,6 +21,7 @@ import ( "github.com/containers/image/v5/internal/iolimits" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/internal/streamdigest" "github.com/containers/image/v5/internal/uploadreader" @@ -32,6 +33,8 @@ import ( "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) type dockerImageDestination struct { @@ -90,7 +93,7 @@ func (d *dockerImageDestination) Reference() types.ImageReference { // Close removes resources associated with an initialized ImageDestination, if any. func (d *dockerImageDestination) Close() error { - return nil + return d.c.Close() } // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. @@ -129,8 +132,8 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry. // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests, // the source blob is uncompressed, and the destination blob is being compressed "on the fly". @@ -138,7 +141,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref)) streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer cleanup() stream = streamCopy @@ -149,10 +152,10 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } if haveBlob { - return reusedInfo, nil + return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil } } @@ -161,16 +164,16 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream logrus.Debugf("Uploading %s", uploadPath) res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer res.Body.Close() if res.StatusCode != http.StatusAccepted { logrus.Debugf("Error initiating layer upload, response %#v", *res) - return types.BlobInfo{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res)) + return private.UploadedBlob{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res)) } uploadLocation, err := res.Location() if err != nil { - return types.BlobInfo{}, fmt.Errorf("determining upload URL: %w", err) + return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err) } digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) @@ -198,7 +201,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream return uploadLocation, nil }() if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } blobDigest := digester.Digest() @@ -209,17 +212,17 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream uploadLocation.RawQuery = locationQuery.Encode() res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer res.Body.Close() if res.StatusCode != http.StatusCreated { logrus.Debugf("Error uploading layer, response %#v", *res) - return types.BlobInfo{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res)) + return private.UploadedBlob{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res)) } logrus.Debugf("Upload of layer %s complete", blobDigest) options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref)) - return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil + return private.UploadedBlob{Digest: blobDigest, Size: sizeCounter.size}, nil } // blobExists returns true iff repo contains a blob with digest, and if so, also its size. @@ -296,34 +299,32 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc // tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified // blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read. // The caller must ensure info.Digest is set. -func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, types.BlobInfo, error) { +func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, private.ReusedBlob, error) { exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } if exists { cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) - return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil + return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil } - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { if info.Digest == "" { - return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest") + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } // First, check whether the blob happens to already exist at the destination. haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } if haveBlob { return true, reusedInfo, nil @@ -393,10 +394,14 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, continue } - return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil + return true, private.ReusedBlob{ + Digest: candidate.Digest, + Size: size, + CompressionOperation: compressionOperation, + CompressionAlgorithm: compressionAlgorithm}, nil } - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } // PutManifest writes manifest to the destination. @@ -731,24 +736,15 @@ func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string, // But right now we don’t want to deal with corner cases like bad digest formats // or unavailable algorithms; in the worst case we end up with duplicate signature // entries. - layer.Digest.String() != digest.FromBytes(payloadBlob).String() { - return false - } - if len(layer.Annotations) != len(annotations) { + layer.Digest.String() != digest.FromBytes(payloadBlob).String() || + !maps.Equal(layer.Annotations, annotations) { return false } - for k, v1 := range layer.Annotations { - if v2, ok := annotations[k]; !ok || v1 != v2 { - return false - } - } - // All annotations in layer exist in sig, and the number of annotations is the same, so all annotations - // in sig also exist in layer. return true } // putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate -// OCI descriptior. +// OCI descriptor. func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) { blobDigest := digest.FromBytes(contents) info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents), @@ -803,12 +799,11 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context if err != nil { return err } - existingSigNames := map[string]struct{}{} + existingSigNames := set.New[string]() for _, sig := range existingSignatures.Signatures { - existingSigNames[sig.Name] = struct{}{} + existingSigNames.Add(sig.Name) } -sigExists: for _, newSigWithFormat := range signatures { newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning) if !ok { @@ -816,10 +811,10 @@ sigExists: } newSig := newSigSimple.UntrustedSignature() - for _, existingSig := range existingSignatures.Signatures { - if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } + if slices.ContainsFunc(existingSignatures.Signatures, func(existingSig extensionSignature) bool { + return existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) + }) { + continue } // The API expect us to invent a new unique name. This is racy, but hopefully good enough. @@ -831,7 +826,7 @@ sigExists: return fmt.Errorf("generating random signature len %d: %w", n, err) } signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes) - if _, ok := existingSigNames[signatureName]; !ok { + if !existingSigNames.Contains(signatureName) { break } } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 373fb259c..231d5d212 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -153,6 +153,7 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica s.Compat = impl.AddCompat(s) if err := s.ensureManifestIsLoaded(ctx); err != nil { + client.Close() return nil, err } return s, nil @@ -166,7 +167,7 @@ func (s *dockerImageSource) Reference() types.ImageReference { // Close removes resources associated with an initialized ImageSource, if any. func (s *dockerImageSource) Close() error { - return nil + return s.c.Close() } // simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) @@ -250,7 +251,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, currentOffset += toSkip } s := signalCloseReader{ - closed: make(chan interface{}), + closed: make(chan struct{}), stream: io.NopCloser(io.LimitReader(body, int64(c.Length))), consumeStream: true, } @@ -292,7 +293,7 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read return } s := signalCloseReader{ - closed: make(chan interface{}), + closed: make(chan struct{}), stream: p, } streams <- s @@ -335,7 +336,7 @@ func parseMediaType(contentType string) (string, map[string]string, error) { func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { headers := make(map[string][]string) - var rangeVals []string + rangeVals := make([]string, 0, len(chunks)) for _, c := range chunks { rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1)) } @@ -605,6 +606,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere if err != nil { return err } + defer c.Close() headers := map[string][]string{ "Accept": manifest.DefaultRequestedManifestMIMETypes, @@ -766,7 +768,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) } type signalCloseReader struct { - closed chan interface{} + closed chan struct{} stream io.ReadCloser consumeStream bool } diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go index 0544bb3c9..6ae849159 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_transport.go +++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go @@ -92,7 +92,7 @@ func (ref dockerReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref dockerReference) StringWithinTransport() string { return "//" + reference.FamiliarString(ref.ref) diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go index 74fe17648..2caa10d7d 100644 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -40,7 +40,7 @@ func httpResponseToError(res *http.Response, context string) error { return ErrUnauthorizedForCredentials{Err: err} default: if context != "" { - context = context + ": " + context += ": " } return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode)) } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go index 9a0ea683e..00e25748b 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -76,15 +76,15 @@ func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { // Ouch, we need to stream the blob into a temporary file just to determine the size. // When the layer is decompressed, we also have to generate the digest on uncompressed data. if inputInfo.Size == -1 || inputInfo.Digest == "" { logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer cleanup() stream = streamCopy @@ -92,47 +92,45 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, } if err := d.archive.lock(); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer d.archive.unlock() // Maybe the blob has been already sent ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } if ok { - return reusedInfo, nil + return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil } if options.IsConfig { buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) if err != nil { - return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err) + return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err) } d.config = buf if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil { - return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err) + return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err) } } else { if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } } d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}) - return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil + return private.UploadedBlob{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { if err := d.archive.lock(); err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } defer d.archive.unlock() diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go index 44e1004a6..df7b2c090 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -13,10 +13,13 @@ import ( "time" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // Writer allows creating a (docker save)-formatted tar archive containing one or more images. @@ -29,7 +32,7 @@ type Writer struct { // Other state. blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs repositories map[string]map[string]string - legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent. + legacyLayers *set.Set[string] // A set of IDs of legacy layers that have been already sent. manifest []ManifestItem manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above. } @@ -42,7 +45,7 @@ func NewWriter(dest io.Writer) *Writer { tar: tar.NewWriter(dest), blobs: make(map[digest.Digest]types.BlobInfo), repositories: map[string]map[string]string{}, - legacyLayers: map[string]struct{}{}, + legacyLayers: set.New[string](), manifestByConfig: map[digest.Digest]int{}, } } @@ -67,17 +70,17 @@ func (w *Writer) unlock() { // tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata. // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // The caller must have locked the Writer. -func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) { +func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, private.ReusedBlob, error) { if info.Digest == "" { - return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest") + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } if blob, ok := w.blobs[info.Digest]; ok { - return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil + return true, private.ReusedBlob{Digest: info.Digest, Size: blob.Size}, nil } - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } // recordBlob records metadata of a recorded blob, which must contain at least a digest and size. @@ -89,7 +92,7 @@ func (w *Writer) recordBlobLocked(info types.BlobInfo) { // ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer // The caller must have locked the Writer. func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error { - if _, ok := w.legacyLayers[layerID]; !ok { + if !w.legacyLayers.Contains(layerID) { // Create a symlink for the legacy format, where there is one subdirectory per layer ("image"). // See also the comment in physicalLayerPath. physicalLayerPath := w.physicalLayerPath(layerDigest) @@ -106,7 +109,7 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges return fmt.Errorf("writing config json file: %w", err) } - w.legacyLayers[layerID] = struct{}{} + w.legacyLayers.Add(layerID) } return nil } @@ -117,7 +120,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De lastLayerID := "" for i, l := range layerDescriptors { // The legacy format requires a config file per layer - layerConfig := make(map[string]interface{}) + layerConfig := make(map[string]any) // The root layer doesn't have any parent if lastLayerID != "" { @@ -188,14 +191,9 @@ func checkManifestItemsMatch(a, b *ManifestItem) error { if a.Config != b.Config { return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config) } - if len(a.Layers) != len(b.Layers) { + if !slices.Equal(a.Layers, b.Layers) { return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers) } - for i := range a.Layers { - if a.Layers[i] != b.Layers[i] { - return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i]) - } - } // Ignore RepoTags, that will be built later. // Ignore Parent and LayerSources, which we don’t set to anything meaningful. return nil @@ -229,9 +227,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des item = &w.manifest[i] } - knownRepoTags := map[string]struct{}{} + knownRepoTags := set.New[string]() for _, repoTag := range item.RepoTags { - knownRepoTags[repoTag] = struct{}{} + knownRepoTags.Add(repoTag) } for _, tag := range repoTags { // For github.com/docker/docker consumers, this works just as well as @@ -252,9 +250,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des // analysis and explanation. refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) - if _, ok := knownRepoTags[refString]; !ok { + if !knownRepoTags.Contains(refString) { item.RepoTags = append(item.RepoTags, refString) - knownRepoTags[refString] = struct{}{} + knownRepoTags.Add(refString) } } @@ -337,7 +335,7 @@ func (t *tarFI) ModTime() time.Time { func (t *tarFI) IsDir() bool { return false } -func (t *tarFI) Sys() interface{} { +func (t *tarFI) Sys() any { return nil } diff --git a/vendor/github.com/containers/image/v5/docker/reference/normalize.go b/vendor/github.com/containers/image/v5/docker/reference/normalize.go index 6a86ec64f..d3f47d210 100644 --- a/vendor/github.com/containers/image/v5/docker/reference/normalize.go +++ b/vendor/github.com/containers/image/v5/docker/reference/normalize.go @@ -104,7 +104,7 @@ func splitDockerDomain(name string) (domain, remainder string) { } // familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain +// to the Docker UI. Familiar names have the default domain // "docker.io" and "library/" repository prefix removed. // For example, "docker.io/library/redis" will have the familiar // name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go index b7cd00b0d..6c5484c06 100644 --- a/vendor/github.com/containers/image/v5/docker/reference/reference.go +++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go @@ -8,8 +8,8 @@ // domain := domain-component ['.' domain-component]* [':' port-number] // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ +// path-component := alphanumeric [separator alphanumeric]* +// alphanumeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ @@ -175,7 +175,7 @@ func splitDomain(name string) (string, string) { // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name -// DEPRECATED: Use Domain or Path +// Deprecated: Use Domain or Path func SplitHostname(named Named) (string, string) { if r, ok := named.(namedRepository); ok { return r.Domain(), r.Path() diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go index bb8c55474..76ba5c2d5 100644 --- a/vendor/github.com/containers/image/v5/docker/reference/regexp.go +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp.go @@ -1,9 +1,10 @@ package reference import ( - storageRegexp "github.com/containers/storage/pkg/regexp" "regexp" "strings" + + storageRegexp "github.com/containers/storage/pkg/regexp" ) const ( diff --git a/vendor/github.com/containers/image/v5/docker/registries_d.go b/vendor/github.com/containers/image/v5/docker/registries_d.go index 61a4964b6..c7b884ab3 100644 --- a/vendor/github.com/containers/image/v5/docker/registries_d.go +++ b/vendor/github.com/containers/image/v5/docker/registries_d.go @@ -13,9 +13,9 @@ import ( "github.com/containers/image/v5/internal/rootless" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" - "github.com/ghodss/yaml" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" ) // systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. @@ -39,18 +39,18 @@ var defaultDockerDir = "/var/lib/containers/sigstore" // registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. // NOTE: Keep this in sync with docs/registries.d.md! type registryConfiguration struct { - DefaultDocker *registryNamespace `json:"default-docker"` + DefaultDocker *registryNamespace `yaml:"default-docker"` // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), - Docker map[string]registryNamespace `json:"docker"` + Docker map[string]registryNamespace `yaml:"docker"` } // registryNamespace defines lookaside locations for a single namespace. type registryNamespace struct { - Lookaside string `json:"lookaside"` // For reading, and if LookasideStaging is not present, for writing. - LookasideStaging string `json:"lookaside-staging"` // For writing only. - SigStore string `json:"sigstore"` // For compatibility, deprecated in favor of Lookaside. - SigStoreStaging string `json:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging. - UseSigstoreAttachments *bool `json:"use-sigstore-attachments,omitempty"` + Lookaside string `yaml:"lookaside"` // For reading, and if LookasideStaging is not present, for writing. + LookasideStaging string `yaml:"lookaside-staging"` // For writing only. + SigStore string `yaml:"sigstore"` // For compatibility, deprecated in favor of Lookaside. + SigStoreStaging string `yaml:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging. + UseSigstoreAttachments *bool `yaml:"use-sigstore-attachments,omitempty"` } // lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage. diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go index 37ca098a8..6bcb835b9 100644 --- a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go +++ b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go @@ -149,7 +149,7 @@ func expectTokenOrQuoted(s string) (value string, rest string) { p := make([]byte, len(s)-1) j := copy(p, s[:i]) escape := true - for i = i + 1; i < len(s); i++ { + for i++; i < len(s); i++ { b := s[i] switch { case escape: diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_list.go b/vendor/github.com/containers/image/v5/internal/image/docker_list.go index 8afc40628..617a451aa 100644 --- a/vendor/github.com/containers/image/v5/internal/image/docker_list.go +++ b/vendor/github.com/containers/image/v5/internal/image/docker_list.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/types" ) diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go index 23a21999a..15c9c2279 100644 --- a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go @@ -381,7 +381,7 @@ func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwawa delete(rawContents, "rootfs") delete(rawContents, "history") - updates := map[string]interface{}{"id": v1ID} + updates := map[string]any{"id": v1ID} if parentV1ID != "" { updates["parent"] = parentV1ID } diff --git a/vendor/github.com/containers/image/v5/internal/image/oci_index.go b/vendor/github.com/containers/image/v5/internal/image/oci_index.go index f4f76622c..0e945c851 100644 --- a/vendor/github.com/containers/image/v5/internal/image/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/image/oci_index.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/types" ) diff --git a/vendor/github.com/containers/image/v5/internal/image/sourced.go b/vendor/github.com/containers/image/v5/internal/image/sourced.go index dc09a9e04..661891aa5 100644 --- a/vendor/github.com/containers/image/v5/internal/image/sourced.go +++ b/vendor/github.com/containers/image/v5/internal/image/sourced.go @@ -10,7 +10,7 @@ import ( ) // FromReference returns a types.ImageCloser implementation for the default instance reading from reference. -// If reference poitns to a manifest list, .Manifest() still returns the manifest list, +// If reference points to a manifest list, .Manifest() still returns the manifest list, // but other methods transparently return data from an appropriate image instance. // // The caller must call .Close() on the returned ImageCloser. diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go index cff68ac16..47c169a1f 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go @@ -43,10 +43,17 @@ func AddCompat(dest private.ImageDestinationInternalOnly) Compat { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - return c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{ + res, err := c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{ Cache: blobinfocache.FromBlobInfoCache(cache), IsConfig: isConfig, }) + if err != nil { + return types.BlobInfo{}, err + } + return types.BlobInfo{ + Digest: res.Digest, + Size: res.Size, + }, nil } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination @@ -59,10 +66,26 @@ func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - return c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{ + reused, blob, err := c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{ Cache: blobinfocache.FromBlobInfoCache(cache), CanSubstitute: canSubstitute, }) + if !reused || err != nil { + return reused, types.BlobInfo{}, err + } + res := types.BlobInfo{ + Digest: blob.Digest, + Size: blob.Size, + CompressionOperation: blob.CompressionOperation, + CompressionAlgorithm: blob.CompressionAlgorithm, + } + // This is probably not necessary; we preserve MediaType to decrease risks of breaking for external callers. + // Some transports were not setting the MediaType field anyway, and others were setting the old value on substitution; + // provide the value in cases where it is likely to be correct. + if blob.Digest == info.Digest { + res.MediaType = info.MediaType + } + return true, res, nil } // PutSignatures writes a set of signatures to the destination. diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go index 225ea4491..0dc6bd5af 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go @@ -39,8 +39,8 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool { // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { - return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) +func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { + return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) } // ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true. diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index 43575ede3..41a81628b 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -46,20 +46,34 @@ func FromPublic(dest types.ImageDestination) private.ImageDestination { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { - return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig) + if err != nil { + return private.UploadedBlob{}, err + } + return private.UploadedBlob{ + Digest: res.Digest, + Size: res.Size, + }, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { - return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) +func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute) + if !reused || err != nil { + return reused, private.ReusedBlob{}, err + } + return true, private.ReusedBlob{ + Digest: blob.Digest, + Size: blob.Size, + CompressionOperation: blob.CompressionOperation, + CompressionAlgorithm: blob.CompressionAlgorithm, + }, nil } // PutSignaturesWithFormat writes a set of signatures to the destination. diff --git a/vendor/github.com/containers/image/v5/internal/manifest/common.go b/vendor/github.com/containers/image/v5/internal/manifest/common.go new file mode 100644 index 000000000..1f2ccb528 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/common.go @@ -0,0 +1,72 @@ +package manifest + +import ( + "encoding/json" + "fmt" +) + +// AllowedManifestFields is a bit mask of “essential” manifest fields that ValidateUnambiguousManifestFormat +// can expect to be present. +type AllowedManifestFields int + +const ( + AllowedFieldConfig AllowedManifestFields = 1 << iota + AllowedFieldFSLayers + AllowedFieldHistory + AllowedFieldLayers + AllowedFieldManifests + AllowedFieldFirstUnusedBit // Keep this at the end! +) + +// ValidateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than +// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields +// other than the ones the caller specifically allows. +// expectedMIMEType is used only for diagnostics. +// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format +// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous +// data that just isn’t what was expected, as opposed to actually ambiguous data. +func ValidateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, + allowed AllowedManifestFields) error { + if allowed >= AllowedFieldFirstUnusedBit { + return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed) + } + // Use a private type to decode, not just a map[string]any, because we want + // to also reject case-insensitive matches (which would be used by Go when really decoding + // the manifest). + // (It is expected that as manifest formats are added or extended over time, more fields will be added + // here.) + detectedFields := struct { + Config any `json:"config"` + FSLayers any `json:"fsLayers"` + History any `json:"history"` + Layers any `json:"layers"` + Manifests any `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &detectedFields); err != nil { + // The caller was supposed to already validate version numbers, so this should not happen; + // let’s not bother with making this error “nice”. + return err + } + unexpected := []string{} + // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste. + if detectedFields.Config != nil && (allowed&AllowedFieldConfig) == 0 { + unexpected = append(unexpected, "config") + } + if detectedFields.FSLayers != nil && (allowed&AllowedFieldFSLayers) == 0 { + unexpected = append(unexpected, "fsLayers") + } + if detectedFields.History != nil && (allowed&AllowedFieldHistory) == 0 { + unexpected = append(unexpected, "history") + } + if detectedFields.Layers != nil && (allowed&AllowedFieldLayers) == 0 { + unexpected = append(unexpected, "layers") + } + if detectedFields.Manifests != nil && (allowed&AllowedFieldManifests) == 0 { + unexpected = append(unexpected, "manifests") + } + if len(unexpected) != 0 { + return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`, + unexpected, expectedMIMEType) + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go new file mode 100644 index 000000000..68d079697 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go @@ -0,0 +1,15 @@ +package manifest + +import ( + "github.com/opencontainers/go-digest" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +// +// This is publicly visible as c/image/manifest.Schema2Descriptor. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go new file mode 100644 index 000000000..e98c5c99e --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -0,0 +1,260 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + platform "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" +) + +// Schema2PlatformSpec describes the platform which a particular manifest is +// specialized for. +// This is publicly visible as c/image/manifest.Schema2PlatformSpec. +type Schema2PlatformSpec struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` // removed in OCI +} + +// Schema2ManifestDescriptor references a platform-specific manifest. +// This is publicly visible as c/image/manifest.Schema2ManifestDescriptor. +type Schema2ManifestDescriptor struct { + Schema2Descriptor + Platform Schema2PlatformSpec `json:"platform"` +} + +// Schema2ListPublic is a list of platform-specific manifests. +// This is publicly visible as c/image/manifest.Schema2List. +// Internal users should usually use Schema2List instead. +type Schema2ListPublic struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []Schema2ManifestDescriptor `json:"manifests"` +} + +// MIMEType returns the MIME type of this particular manifest list. +func (list *Schema2ListPublic) MIMEType() string { + return list.MediaType +} + +// Instances returns a slice of digests of the manifests that this list knows of. +func (list *Schema2ListPublic) Instances() []digest.Digest { + results := make([]digest.Digest, len(list.Manifests)) + for i, m := range list.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the list. +func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range list.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(list.Manifests) { + return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) + } + list.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + list.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) + } + list.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { + // ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list. + return list.ChooseInstance(ctx) +} + +// ChooseInstance parses blob as a schema2 manifest list, and returns the digest +// of the image which is appropriate for the current environment. +func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) + } + for _, wantedPlatform := range wantedPlatforms { + for _, d := range list.Manifests { + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: slices.Clone(d.Platform.OSFeatures), + Variant: d.Platform.Variant, + } + if platform.MatchesPlatform(imagePlatform, wantedPlatform) { + return d.Digest, nil + } + } + } + return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) +} + +// Serialize returns the list in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (list *Schema2ListPublic) Serialize() ([]byte, error) { + buf, err := json.Marshal(list) + if err != nil { + return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err) + } + return buf, nil +} + +// Schema2ListPublicFromComponents creates a Schema2 manifest list instance from the +// supplied data. +// This is publicly visible as c/image/manifest.Schema2ListFromComponents. +func Schema2ListPublicFromComponents(components []Schema2ManifestDescriptor) *Schema2ListPublic { + list := Schema2ListPublic{ + SchemaVersion: 2, + MediaType: DockerV2ListMediaType, + Manifests: make([]Schema2ManifestDescriptor, len(components)), + } + for i, component := range components { + m := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: slices.Clone(component.URLs), + }, + Schema2PlatformSpec{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: slices.Clone(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + Features: slices.Clone(component.Platform.Features), + }, + } + list.Manifests[i] = m + } + return &list +} + +// Schema2ListPublicClone creates a deep copy of the passed-in list. +// This is publicly visible as c/image/manifest.Schema2ListClone. +func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic { + return Schema2ListPublicFromComponents(list.Manifests) +} + +// ToOCI1Index returns the list encoded as an OCI1 index. +func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) { + components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) + for _, manifest := range list.Manifests { + converted := imgspecv1.Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: slices.Clone(manifest.URLs), + Platform: &imgspecv1.Platform{ + OS: manifest.Platform.OS, + Architecture: manifest.Platform.Architecture, + OSFeatures: slices.Clone(manifest.Platform.OSFeatures), + OSVersion: manifest.Platform.OSVersion, + Variant: manifest.Platform.Variant, + }, + } + components = append(components, converted) + } + oci := OCI1IndexPublicFromComponents(components, nil) + return oci, nil +} + +// ToSchema2List returns the list encoded as a Schema2 list. +func (list *Schema2ListPublic) ToSchema2List() (*Schema2ListPublic, error) { + return Schema2ListPublicClone(list), nil +} + +// Schema2ListPublicFromManifest creates a Schema2 manifest list instance from marshalled +// JSON, presumably generated by encoding a Schema2 manifest list. +// This is publicly visible as c/image/manifest.Schema2ListFromManifest. +func Schema2ListPublicFromManifest(manifest []byte) (*Schema2ListPublic, error) { + list := Schema2ListPublic{ + Manifests: []Schema2ManifestDescriptor{}, + } + if err := json.Unmarshal(manifest, &list); err != nil { + return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err) + } + if err := ValidateUnambiguousManifestFormat(manifest, DockerV2ListMediaType, + AllowedFieldManifests); err != nil { + return nil, err + } + return &list, nil +} + +// Clone returns a deep copy of this list and its contents. +func (list *Schema2ListPublic) Clone() ListPublic { + return Schema2ListPublicClone(list) +} + +// ConvertToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func (list *Schema2ListPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return list.Clone(), nil + case imgspecv1.MediaTypeImageIndex: + return list.ToOCI1Index() + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType) + } +} + +// Schema2List is a list of platform-specific manifests. +type Schema2List struct { + Schema2ListPublic +} + +func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List { + return &Schema2List{*public} +} + +func (index *Schema2List) CloneInternal() List { + return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic)) +} + +func (index *Schema2List) Clone() ListPublic { + return index.CloneInternal() +} + +// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled +// JSON, presumably generated by encoding a Schema2 manifest list. +func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { + public, err := Schema2ListPublicFromManifest(manifest) + if err != nil { + return nil, err + } + return schema2ListFromPublic(public), nil +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/errors.go b/vendor/github.com/containers/image/v5/internal/manifest/errors.go index e5732a8c4..6ebe4b24c 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/errors.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/errors.go @@ -2,6 +2,10 @@ package manifest import "fmt" +// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType. +// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 . +const dockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + // NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation // on an object which is not a “container image” in the standard sense (e.g. an OCI artifact) // @@ -28,5 +32,9 @@ func NewNonImageArtifactError(mimeType string) error { } func (e NonImageArtifactError) Error() string { + // Special-case these invalid mixed images, which show up from time to time: + if e.mimeType == dockerV2Schema2ConfigMediaType { + return fmt.Sprintf("invalid mixed OCI image with Docker v2s2 config (%q)", e.mimeType) + } return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType) } diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go new file mode 100644 index 000000000..07c7d85f4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go @@ -0,0 +1,90 @@ +package manifest + +import ( + "fmt" + + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ListPublic is a subset of List which is a part of the public API; +// so no methods can be added, removed or changed. +// +// Internal users should usually use List instead. +type ListPublic interface { + // MIMEType returns the MIME type of this particular manifest list. + MIMEType() string + + // Instances returns a list of the manifests that this list knows of, other than its own. + Instances() []digest.Digest + + // Update information about the list's instances. The length of the passed-in slice must + // match the length of the list of instances which the list already contains, and every field + // must be specified. + UpdateInstances([]ListUpdate) error + + // Instance returns the size and MIME type of a particular instance in the list. + Instance(digest.Digest) (ListUpdate, error) + + // ChooseInstance selects which manifest is most appropriate for the platform described by the + // SystemContext, or for the current platform if the SystemContext doesn't specify any details. + ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) + + // Serialize returns the list in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded + // from, even if no modifications were made! + Serialize() ([]byte, error) + + // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error. + ConvertToMIMEType(mimeType string) (ListPublic, error) + + // Clone returns a deep copy of this list and its contents. + Clone() ListPublic +} + +// List is an interface for parsing, modifying lists of image manifests. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members +// directly. +type List interface { + ListPublic + // CloneInternal returns a deep copy of this list and its contents. + CloneInternal() List + // ChooseInstanceInstanceByCompression selects which manifest is most appropriate for the platform and compression described by the + // SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which + // when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined. + ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) +} + +// ListUpdate includes the fields which a List's UpdateInstances() method will modify. +// This is publicly visible as c/image/manifest.ListUpdate. +type ListUpdate struct { + Digest digest.Digest + Size int64 + MediaType string +} + +// ListPublicFromBlob parses a list of manifests. +// This is publicly visible as c/image/manifest.ListFromBlob. +func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) { + list, err := ListFromBlob(manifest, manifestMIMEType) + if err != nil { + return nil, err + } + return list, nil +} + +// ListFromBlob parses a list of manifests. +func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { + normalized := NormalizedMIMEType(manifestMIMEType) + switch normalized { + case DockerV2ListMediaType: + return Schema2ListFromManifest(manifest) + case imgspecv1.MediaTypeImageIndex: + return OCI1IndexFromManifest(manifest) + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") + } + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized) +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go new file mode 100644 index 000000000..1dbcc1418 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go @@ -0,0 +1,167 @@ +package manifest + +import ( + "encoding/json" + + "github.com/containers/libtrust" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: Should we just use docker/distribution and docker/docker implementations directly? + +// FIXME(runcom, mitr): should we have a mediatype pkg?? +const ( + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 + DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 + DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. + DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" + // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list + DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. +// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, +// but we may not have such metadata available (e.g. when the manifest is a local file). +// This is publicly visible as c/image/manifest.GuessMIMEType. +func GuessMIMEType(manifest []byte) string { + // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. + // Also docker/distribution/manifest.Versioned. + meta := struct { + MediaType string `json:"mediaType"` + SchemaVersion int `json:"schemaVersion"` + Signatures any `json:"signatures"` + }{} + if err := json.Unmarshal(manifest, &meta); err != nil { + return "" + } + + switch meta.MediaType { + case DockerV2Schema2MediaType, DockerV2ListMediaType, + imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. + return meta.MediaType + } + // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. + switch meta.SchemaVersion { + case 1: + if meta.Signatures != nil { + return DockerV2Schema1SignedMediaType + } + return DockerV2Schema1MediaType + case 2: + // Best effort to understand if this is an OCI image since mediaType + // wasn't in the manifest for OCI image-spec < 1.0.2. + // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + ociMan := struct { + Config struct { + MediaType string `json:"mediaType"` + } `json:"config"` + }{} + if err := json.Unmarshal(manifest, &ociMan); err != nil { + return "" + } + switch ociMan.Config.MediaType { + case imgspecv1.MediaTypeImageConfig: + return imgspecv1.MediaTypeImageManifest + case DockerV2Schema2ConfigMediaType: + // This case should not happen since a Docker image + // must declare a top-level media type and + // `meta.MediaType` has already been checked. + return DockerV2Schema2MediaType + } + // Maybe an image index or an OCI artifact. + ociIndex := struct { + Manifests []imgspecv1.Descriptor `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &ociIndex); err != nil { + return "" + } + if len(ociIndex.Manifests) != 0 { + if ociMan.Config.MediaType == "" { + return imgspecv1.MediaTypeImageIndex + } + // FIXME: this is mixing media types of manifests and configs. + return ociMan.Config.MediaType + } + // It's most likely an OCI artifact with a custom config media + // type which is not (and cannot) be covered by the media-type + // checks cabove. + return imgspecv1.MediaTypeImageManifest + } + return "" +} + +// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. +// This is publicly visible as c/image/manifest.Digest. +func Digest(manifest []byte) (digest.Digest, error) { + if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { + sig, err := libtrust.ParsePrettySignature(manifest, "signatures") + if err != nil { + return "", err + } + manifest, err = sig.Payload() + if err != nil { + // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string + // that libtrust itself has josebase64UrlEncode()d + return "", err + } + } + + return digest.FromBytes(manifest), nil +} + +// MatchesDigest returns true iff the manifest matches expectedDigest. +// Error may be set if this returns false. +// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, +// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. +// This is publicly visible as c/image/manifest.MatchesDigest. +func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { + // This should eventually support various digest types. + actualDigest, err := Digest(manifest) + if err != nil { + return false, err + } + return expectedDigest == actualDigest, nil +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +// This is publicly visible as c/image/manifest.NormalizedMIMEType. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go new file mode 100644 index 000000000..8e911678e --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -0,0 +1,329 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "math" + "runtime" + + platform "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +const ( + // OCI1InstanceAnnotationCompressionZSTD is an annotation name that can be placed on a manifest descriptor in an OCI index. + // The value of the annotation must be the string "true". + // If this annotation is present on a manifest, consuming that image instance requires support for Zstd compression. + // That also suggests that this instance benefits from + // Zstd compression, so it can be preferred by compatible consumers over instances that + // use gzip, depending on their local policy. + OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd" +) + +// OCI1IndexPublic is just an alias for the OCI index type, but one which we can +// provide methods for. +// This is publicly visible as c/image/manifest.OCI1Index +// Internal users should usually use OCI1Index instead. +type OCI1IndexPublic struct { + imgspecv1.Index +} + +// MIMEType returns the MIME type of this particular manifest index. +func (index *OCI1IndexPublic) MIMEType() string { + return imgspecv1.MediaTypeImageIndex +} + +// Instances returns a slice of digests of the manifests that this index knows of. +func (index *OCI1IndexPublic) Instances() []digest.Digest { + results := make([]digest.Digest, len(index.Manifests)) + for i, m := range index.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the index. +func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range index.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(index.Manifests) { + return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) + } + index.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + index.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) + } + index.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +// instanceIsZstd returns true if instance is a zstd instance otherwise false. +func instanceIsZstd(manifest imgspecv1.Descriptor) bool { + if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" { + return true + } + return false +} + +type instanceCandidate struct { + platformIndex int // Index of the candidate in platform.WantedPlatforms: lower numbers are preferred; or math.maxInt if the candidate doesn’t have a platform + isZstd bool // tells if particular instance if zstd instance + manifestPosition int // A zero-based index of the instance in the manifest list + digest digest.Digest // Instance digest +} + +func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool { + switch { + case ic.platformIndex != other.platformIndex: + return ic.platformIndex < other.platformIndex + case ic.isZstd != other.isZstd: + if !preferGzip { + return ic.isZstd + } else { + return !ic.isZstd + } + case ic.manifestPosition != other.manifestPosition: + return ic.manifestPosition < other.manifestPosition + } + panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition. +} + +// chooseInstance is a private equivalent to ChooseInstanceByCompression, +// shared by ChooseInstance and ChooseInstanceByCompression. +func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { + didPreferGzip := false + if preferGzip == types.OptionalBoolTrue { + didPreferGzip = true + } + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) + } + var bestMatch *instanceCandidate + bestMatch = nil + for manifestIndex, d := range index.Manifests { + candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest} + if d.Platform != nil { + foundPlatform := false + for platformIndex, wantedPlatform := range wantedPlatforms { + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: slices.Clone(d.Platform.OSFeatures), + Variant: d.Platform.Variant, + } + if platform.MatchesPlatform(imagePlatform, wantedPlatform) { + foundPlatform = true + candidate.platformIndex = platformIndex + break + } + } + if !foundPlatform { + continue + } + } + if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) { + bestMatch = &candidate + } + } + if bestMatch != nil { + return bestMatch.digest, nil + } + return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) +} + +func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { + return index.chooseInstance(ctx, preferGzip) +} + +// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest +// of the image which is appropriate for the current environment. +func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + return index.chooseInstance(ctx, types.OptionalBoolFalse) +} + +// Serialize returns the index in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (index *OCI1IndexPublic) Serialize() ([]byte, error) { + buf, err := json.Marshal(index) + if err != nil { + return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err) + } + return buf, nil +} + +// OCI1IndexPublicFromComponents creates an OCI1 image index instance from the +// supplied data. +// This is publicly visible as c/image/manifest.OCI1IndexFromComponents. +func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1IndexPublic { + index := OCI1IndexPublic{ + imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, + Manifests: make([]imgspecv1.Descriptor, len(components)), + Annotations: maps.Clone(annotations), + }, + } + for i, component := range components { + var platform *imgspecv1.Platform + if component.Platform != nil { + platform = &imgspecv1.Platform{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: slices.Clone(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + } + } + m := imgspecv1.Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: slices.Clone(component.URLs), + Annotations: maps.Clone(component.Annotations), + Platform: platform, + } + index.Manifests[i] = m + } + return &index +} + +// OCI1IndexPublicClone creates a deep copy of the passed-in index. +// This is publicly visible as c/image/manifest.OCI1IndexClone. +func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic { + return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations) +} + +// ToOCI1Index returns the index encoded as an OCI1 index. +func (index *OCI1IndexPublic) ToOCI1Index() (*OCI1IndexPublic, error) { + return OCI1IndexPublicClone(index), nil +} + +// ToSchema2List returns the index encoded as a Schema2 list. +func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) { + components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests)) + for _, manifest := range index.Manifests { + platform := manifest.Platform + if platform == nil { + platform = &imgspecv1.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + } + } + converted := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: slices.Clone(manifest.URLs), + }, + Schema2PlatformSpec{ + OS: platform.OS, + Architecture: platform.Architecture, + OSFeatures: slices.Clone(platform.OSFeatures), + OSVersion: platform.OSVersion, + Variant: platform.Variant, + }, + } + components = append(components, converted) + } + s2 := Schema2ListPublicFromComponents(components) + return s2, nil +} + +// OCI1IndexPublicFromManifest creates an OCI1 manifest index instance from marshalled +// JSON, presumably generated by encoding a OCI1 manifest index. +// This is publicly visible as c/image/manifest.OCI1IndexFromManifest. +func OCI1IndexPublicFromManifest(manifest []byte) (*OCI1IndexPublic, error) { + index := OCI1IndexPublic{ + Index: imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, + Manifests: []imgspecv1.Descriptor{}, + Annotations: make(map[string]string), + }, + } + if err := json.Unmarshal(manifest, &index); err != nil { + return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err) + } + if err := ValidateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, + AllowedFieldManifests); err != nil { + return nil, err + } + return &index, nil +} + +// Clone returns a deep copy of this list and its contents. +func (index *OCI1IndexPublic) Clone() ListPublic { + return OCI1IndexPublicClone(index) +} + +// ConvertToMIMEType converts the passed-in image index to a manifest list of +// the specified type. +func (index *OCI1IndexPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return index.ToSchema2List() + case imgspecv1.MediaTypeImageIndex: + return index.Clone(), nil + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType) + } +} + +type OCI1Index struct { + OCI1IndexPublic +} + +func oci1IndexFromPublic(public *OCI1IndexPublic) *OCI1Index { + return &OCI1Index{*public} +} + +func (index *OCI1Index) CloneInternal() List { + return oci1IndexFromPublic(OCI1IndexPublicClone(&index.OCI1IndexPublic)) +} + +func (index *OCI1Index) Clone() ListPublic { + return index.CloneInternal() +} + +// OCI1IndexFromManifest creates a OCI1 manifest list instance from marshalled +// JSON, presumably generated by encoding a OCI1 manifest list. +func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { + public, err := OCI1IndexPublicFromManifest(manifest) + if err != nil { + return nil, err + } + return oci1IndexFromPublic(public), nil +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go index 3bda6af29..59b1d4b9e 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -25,6 +25,7 @@ import ( "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" ) // For Linux, the kernel has already detected the ABI, ISA and Features. @@ -152,13 +153,9 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { if wantedVariant != "" { // If the user requested a specific variant, we'll walk down // the list from most to least compatible. - if compatibility[wantedArch] != nil { - variantOrder := compatibility[wantedArch] - for i, v := range variantOrder { - if wantedVariant == v { - variants = variantOrder[i:] - break - } + if variantOrder := compatibility[wantedArch]; variantOrder != nil { + if i := slices.Index(variantOrder, wantedVariant); i != -1 { + variants = variantOrder[i:] } } if variants == nil { diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index bfd6148ce..b1dd4ceb0 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -7,6 +7,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/internal/signature" + compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" ) @@ -46,24 +47,22 @@ type ImageDestinationInternalOnly interface { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. - // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. - PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error) + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. + PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (UploadedBlob, error) // PutBlobPartial attempts to create a blob using the data that is already present // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. - PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) + PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error) // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. - // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may - // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be - // reflected in the manifest that will be written. + // If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. - TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error) + TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, ReusedBlob, error) // PutSignaturesWithFormat writes a set of signatures to the destination. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for @@ -79,6 +78,13 @@ type ImageDestination interface { ImageDestinationInternalOnly } +// UploadedBlob is information about a blob written to a destination. +// It is the subset of types.BlobInfo fields the transport is responsible for setting; all fields must be provided. +type UploadedBlob struct { + Digest digest.Digest + Size int64 +} + // PutBlobOptions are used in PutBlobWithOptions. type PutBlobOptions struct { Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos. @@ -112,6 +118,17 @@ type TryReusingBlobOptions struct { SrcRef reference.Named // A reference to the source image that contains the input blob. } +// ReusedBlob is information about a blob reused in a destination. +// It is the subset of types.BlobInfo fields the transport is responsible for setting. +type ReusedBlob struct { + Digest digest.Digest // Must be provided + Size int64 // Must be provided + // The following compression fields should be set when the reuse substitutes + // a differently-compressed blob. + CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A + CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A +} + // ImageSourceChunk is a portion of a blob. // This API is experimental and can be changed without bumping the major version number. type ImageSourceChunk struct { diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go new file mode 100644 index 000000000..5c7bcabef --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/set/set.go @@ -0,0 +1,46 @@ +package set + +import "golang.org/x/exp/maps" + +// FIXME: +// - Docstrings +// - This should be in a public library somewhere + +type Set[E comparable] struct { + m map[E]struct{} +} + +func New[E comparable]() *Set[E] { + return &Set[E]{ + m: map[E]struct{}{}, + } +} + +func NewWithValues[E comparable](values ...E) *Set[E] { + s := New[E]() + for _, v := range values { + s.Add(v) + } + return s +} + +func (s Set[E]) Add(v E) { + s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. +} + +func (s Set[E]) Delete(v E) { + delete(s.m, v) +} + +func (s *Set[E]) Contains(v E) bool { + _, ok := s.m[v] + return ok +} + +func (s *Set[E]) Empty() bool { + return len(s.m) == 0 +} + +func (s *Set[E]) Values() []E { + return maps.Keys(s.m) +} diff --git a/vendor/github.com/containers/image/v5/internal/signature/signature.go b/vendor/github.com/containers/image/v5/internal/signature/signature.go index ee90b788b..6f95115a1 100644 --- a/vendor/github.com/containers/image/v5/internal/signature/signature.go +++ b/vendor/github.com/containers/image/v5/internal/signature/signature.go @@ -24,7 +24,7 @@ type Signature interface { blobChunk() ([]byte, error) } -// BlobChunk returns a representation of sig as a []byte, suitable for long-term storage. +// Blob returns a representation of sig as a []byte, suitable for long-term storage. func Blob(sig Signature) ([]byte, error) { chunk, err := sig.blobChunk() if err != nil { @@ -66,22 +66,20 @@ func FromBlob(blob []byte) (Signature, error) { // The newer format: binary 0, format name, newline, data case 0x00: blob = blob[1:] - newline := bytes.IndexByte(blob, '\n') - if newline == -1 { + formatBytes, blobChunk, foundNewline := bytes.Cut(blob, []byte{'\n'}) + if !foundNewline { return nil, fmt.Errorf("invalid signature format, missing newline") } - formatBytes := blob[:newline] for _, b := range formatBytes { if b < 32 || b >= 0x7F { return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b) } } - blobChunk := blob[newline+1:] switch { case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)): return SimpleSigningFromBlob(blobChunk), nil case bytes.Equal(formatBytes, []byte(SigstoreFormat)): - return SigstoreFromBlobChunk(blobChunk) + return sigstoreFromBlobChunk(blobChunk) default: return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes)) } @@ -102,10 +100,3 @@ func UnsupportedFormatError(sig Signature) error { return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID)) } } - -// copyByteSlice returns a guaranteed-fresh copy of a byte slice -// Use this to make sure the underlying data is not shared and can’t be unexpectedly modified. -func copyByteSlice(s []byte) []byte { - res := []byte{} - return append(res, s...) -} diff --git a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go index 30aff4c1e..b8a9b366c 100644 --- a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go +++ b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go @@ -1,6 +1,11 @@ package signature -import "encoding/json" +import ( + "encoding/json" + + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) const ( // from sigstore/cosign/pkg/types.SimpleSigningMediaType @@ -40,13 +45,13 @@ type sigstoreJSONRepresentation struct { func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore { return Sigstore{ untrustedMIMEType: untrustedMimeType, - untrustedPayload: copyByteSlice(untrustedPayload), - untrustedAnnotations: copyStringMap(untrustedAnnotations), + untrustedPayload: slices.Clone(untrustedPayload), + untrustedAnnotations: maps.Clone(untrustedAnnotations), } } -// SigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object. -func SigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) { +// sigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object. +func sigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) { var v sigstoreJSONRepresentation if err := json.Unmarshal(blobChunk, &v); err != nil { return Sigstore{}, err @@ -74,17 +79,9 @@ func (s Sigstore) UntrustedMIMEType() string { return s.untrustedMIMEType } func (s Sigstore) UntrustedPayload() []byte { - return copyByteSlice(s.untrustedPayload) + return slices.Clone(s.untrustedPayload) } func (s Sigstore) UntrustedAnnotations() map[string]string { - return copyStringMap(s.untrustedAnnotations) -} - -func copyStringMap(m map[string]string) map[string]string { - res := map[string]string{} - for k, v := range m { - res[k] = v - } - return res + return maps.Clone(s.untrustedAnnotations) } diff --git a/vendor/github.com/containers/image/v5/internal/signature/simple.go b/vendor/github.com/containers/image/v5/internal/signature/simple.go index 88b8adad0..c09370406 100644 --- a/vendor/github.com/containers/image/v5/internal/signature/simple.go +++ b/vendor/github.com/containers/image/v5/internal/signature/simple.go @@ -1,5 +1,7 @@ package signature +import "golang.org/x/exp/slices" + // SimpleSigning is a “simple signing” signature. type SimpleSigning struct { untrustedSignature []byte @@ -8,7 +10,7 @@ type SimpleSigning struct { // SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object. func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning { return SimpleSigning{ - untrustedSignature: copyByteSlice(blobChunk), + untrustedSignature: slices.Clone(blobChunk), } } @@ -19,9 +21,9 @@ func (s SimpleSigning) FormatID() FormatID { // blobChunk returns a representation of signature as a []byte, suitable for long-term storage. // Almost everyone should use signature.Blob() instead. func (s SimpleSigning) blobChunk() ([]byte, error) { - return copyByteSlice(s.untrustedSignature), nil + return slices.Clone(s.untrustedSignature), nil } func (s SimpleSigning) UntrustedSignature() []byte { - return copyByteSlice(s.untrustedSignature) + return slices.Clone(s.untrustedSignature) } diff --git a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go index 6aa9ead68..b95370af7 100644 --- a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go +++ b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go @@ -11,7 +11,7 @@ import ( // The net/http package uses a separate goroutine to upload data to a HTTP connection, // and it is possible for the server to return a response (typically an error) before consuming // the full body of the request. In that case http.Client.Do can return with an error while -// the body is still being read — regardless of of the cancellation, if any, of http.Request.Context(). +// the body is still being read — regardless of the cancellation, if any, of http.Request.Context(). // // As a result, any data used/updated by the io.Reader() provided as the request body may be // used/updated even after http.Client.Do returns, causing races. diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index 5f352acc2..1bdcf3d30 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -1,7 +1,6 @@ package manifest import ( - "encoding/json" "fmt" compressiontypes "github.com/containers/image/v5/pkg/compression/types" @@ -9,96 +8,6 @@ import ( "github.com/sirupsen/logrus" ) -// dupStringSlice returns a deep copy of a slice of strings, or nil if the -// source slice is empty. -func dupStringSlice(list []string) []string { - if len(list) == 0 { - return nil - } - dup := make([]string, len(list)) - copy(dup, list) - return dup -} - -// dupStringStringMap returns a deep copy of a map[string]string, or nil if the -// passed-in map is nil or has no keys. -func dupStringStringMap(m map[string]string) map[string]string { - if len(m) == 0 { - return nil - } - result := make(map[string]string) - for k, v := range m { - result[k] = v - } - return result -} - -// allowedManifestFields is a bit mask of “essential” manifest fields that validateUnambiguousManifestFormat -// can expect to be present. -type allowedManifestFields int - -const ( - allowedFieldConfig allowedManifestFields = 1 << iota - allowedFieldFSLayers - allowedFieldHistory - allowedFieldLayers - allowedFieldManifests - allowedFieldFirstUnusedBit // Keep this at the end! -) - -// validateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than -// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields -// other than the ones the caller specifically allows. -// expectedMIMEType is used only for diagnostics. -// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format -// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous -// data that just isn’t what was expected, as opposed to actually ambiguous data. -func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, - allowed allowedManifestFields) error { - if allowed >= allowedFieldFirstUnusedBit { - return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed) - } - // Use a private type to decode, not just a map[string]interface{}, because we want - // to also reject case-insensitive matches (which would be used by Go when really decoding - // the manifest). - // (It is expected that as manifest formats are added or extended over time, more fields will be added - // here.) - detectedFields := struct { - Config interface{} `json:"config"` - FSLayers interface{} `json:"fsLayers"` - History interface{} `json:"history"` - Layers interface{} `json:"layers"` - Manifests interface{} `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &detectedFields); err != nil { - // The caller was supposed to already validate version numbers, so this should not happen; - // let’s not bother with making this error “nice”. - return err - } - unexpected := []string{} - // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste. - if detectedFields.Config != nil && (allowed&allowedFieldConfig) == 0 { - unexpected = append(unexpected, "config") - } - if detectedFields.FSLayers != nil && (allowed&allowedFieldFSLayers) == 0 { - unexpected = append(unexpected, "fsLayers") - } - if detectedFields.History != nil && (allowed&allowedFieldHistory) == 0 { - unexpected = append(unexpected, "history") - } - if detectedFields.Layers != nil && (allowed&allowedFieldLayers) == 0 { - unexpected = append(unexpected, "layers") - } - if detectedFields.Manifests != nil && (allowed&allowedFieldManifests) == 0 { - unexpected = append(unexpected, "manifests") - } - if len(unexpected) != 0 { - return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`, - unexpected, expectedMIMEType) - } - return nil -} - // layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() // method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. func layerInfosToStrings(infos []LayerInfo) []string { diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index 8e260c03d..7b9c4b58f 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -8,10 +8,13 @@ import ( "time" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/regexp" "github.com/docker/docker/api/types/versions" "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" ) // Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. @@ -53,16 +56,16 @@ type Schema1V1Compatibility struct { // Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. // (NOTE: The instance is not necessary a literal representation of the original blob, // layers with duplicate IDs are eliminated.) -func Schema1FromManifest(manifest []byte) (*Schema1, error) { +func Schema1FromManifest(manifestBlob []byte) (*Schema1, error) { s1 := Schema1{} - if err := json.Unmarshal(manifest, &s1); err != nil { + if err := json.Unmarshal(manifestBlob, &s1); err != nil { return nil, err } if s1.SchemaVersion != 1 { return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion) } - if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType, - allowedFieldFSLayers|allowedFieldHistory); err != nil { + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema1SignedMediaType, + manifest.AllowedFieldFSLayers|manifest.AllowedFieldHistory); err != nil { return nil, err } if err := s1.initialize(); err != nil { @@ -183,22 +186,22 @@ func (m *Schema1) fixManifestLayers() error { return errors.New("Invalid parent ID in the base layer of the image") } // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) + idmap := set.New[string]() var lastID string for _, img := range m.ExtractedV1Compatibility { // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { + if img.ID != lastID && idmap.Contains(img.ID) { return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) } lastID = img.ID - idmap[lastID] = struct{}{} + idmap.Add(lastID) } // backwards loop so that we keep the remaining indexes after removing items for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue - m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) - m.History = append(m.History[:i], m.History[i+1:]...) - m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) + m.FSLayers = slices.Delete(m.FSLayers, i, i+1) + m.History = slices.Delete(m.History, i, i+1) + m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1) } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) } diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go index d9eca043b..3c9745dde 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/containers/image/v5/internal/manifest" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/pkg/strslice" "github.com/containers/image/v5/types" @@ -12,12 +13,7 @@ import ( ) // Schema2Descriptor is a “descriptor” in docker/distribution schema 2. -type Schema2Descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} +type Schema2Descriptor = manifest.Schema2Descriptor // BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { @@ -159,13 +155,13 @@ type Schema2Image struct { } // Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. -func Schema2FromManifest(manifest []byte) (*Schema2, error) { +func Schema2FromManifest(manifestBlob []byte) (*Schema2, error) { s2 := Schema2{} - if err := json.Unmarshal(manifest, &s2); err != nil { + if err := json.Unmarshal(manifestBlob, &s2); err != nil { return nil, err } - if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema2MediaType, - allowedFieldConfig|allowedFieldLayers); err != nil { + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema2MediaType, + manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { return nil, err } // Check manifest's and layers' media types. diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go index 7e4cc5183..c958a3fa3 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go @@ -1,220 +1,32 @@ package manifest import ( - "encoding/json" - "fmt" - - platform "github.com/containers/image/v5/internal/pkg/platform" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/containers/image/v5/internal/manifest" ) // Schema2PlatformSpec describes the platform which a particular manifest is // specialized for. -type Schema2PlatformSpec struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` // removed in OCI -} +type Schema2PlatformSpec = manifest.Schema2PlatformSpec // Schema2ManifestDescriptor references a platform-specific manifest. -type Schema2ManifestDescriptor struct { - Schema2Descriptor - Platform Schema2PlatformSpec `json:"platform"` -} +type Schema2ManifestDescriptor = manifest.Schema2ManifestDescriptor // Schema2List is a list of platform-specific manifests. -type Schema2List struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - Manifests []Schema2ManifestDescriptor `json:"manifests"` -} - -// MIMEType returns the MIME type of this particular manifest list. -func (list *Schema2List) MIMEType() string { - return list.MediaType -} - -// Instances returns a slice of digests of the manifests that this list knows of. -func (list *Schema2List) Instances() []digest.Digest { - results := make([]digest.Digest, len(list.Manifests)) - for i, m := range list.Manifests { - results[i] = m.Digest - } - return results -} - -// Instance returns the ListUpdate of a particular instance in the list. -func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) { - for _, manifest := range list.Manifests { - if manifest.Digest == instanceDigest { - return ListUpdate{ - Digest: manifest.Digest, - Size: manifest.Size, - MediaType: manifest.MediaType, - }, nil - } - } - return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) -} - -// UpdateInstances updates the sizes, digests, and media types of the manifests -// which the list catalogs. -func (list *Schema2List) UpdateInstances(updates []ListUpdate) error { - if len(updates) != len(list.Manifests) { - return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) - } - for i := range updates { - if err := updates[i].Digest.Validate(); err != nil { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) - } - list.Manifests[i].Digest = updates[i].Digest - if updates[i].Size < 0 { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) - } - list.Manifests[i].Size = updates[i].Size - if updates[i].MediaType == "" { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) - } - list.Manifests[i].MediaType = updates[i].MediaType - } - return nil -} - -// ChooseInstance parses blob as a schema2 manifest list, and returns the digest -// of the image which is appropriate for the current environment. -func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - wantedPlatforms, err := platform.WantedPlatforms(ctx) - if err != nil { - return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) - } - for _, wantedPlatform := range wantedPlatforms { - for _, d := range list.Manifests { - imagePlatform := imgspecv1.Platform{ - Architecture: d.Platform.Architecture, - OS: d.Platform.OS, - OSVersion: d.Platform.OSVersion, - OSFeatures: dupStringSlice(d.Platform.OSFeatures), - Variant: d.Platform.Variant, - } - if platform.MatchesPlatform(imagePlatform, wantedPlatform) { - return d.Digest, nil - } - } - } - return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) -} - -// Serialize returns the list in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (list *Schema2List) Serialize() ([]byte, error) { - buf, err := json.Marshal(list) - if err != nil { - return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err) - } - return buf, nil -} +type Schema2List = manifest.Schema2ListPublic // Schema2ListFromComponents creates a Schema2 manifest list instance from the // supplied data. func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List { - list := Schema2List{ - SchemaVersion: 2, - MediaType: DockerV2ListMediaType, - Manifests: make([]Schema2ManifestDescriptor, len(components)), - } - for i, component := range components { - m := Schema2ManifestDescriptor{ - Schema2Descriptor{ - MediaType: component.MediaType, - Size: component.Size, - Digest: component.Digest, - URLs: dupStringSlice(component.URLs), - }, - Schema2PlatformSpec{ - Architecture: component.Platform.Architecture, - OS: component.Platform.OS, - OSVersion: component.Platform.OSVersion, - OSFeatures: dupStringSlice(component.Platform.OSFeatures), - Variant: component.Platform.Variant, - Features: dupStringSlice(component.Platform.Features), - }, - } - list.Manifests[i] = m - } - return &list + return manifest.Schema2ListPublicFromComponents(components) } // Schema2ListClone creates a deep copy of the passed-in list. func Schema2ListClone(list *Schema2List) *Schema2List { - return Schema2ListFromComponents(list.Manifests) -} - -// ToOCI1Index returns the list encoded as an OCI1 index. -func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) { - components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) - for _, manifest := range list.Manifests { - converted := imgspecv1.Descriptor{ - MediaType: manifest.MediaType, - Size: manifest.Size, - Digest: manifest.Digest, - URLs: dupStringSlice(manifest.URLs), - Platform: &imgspecv1.Platform{ - OS: manifest.Platform.OS, - Architecture: manifest.Platform.Architecture, - OSFeatures: dupStringSlice(manifest.Platform.OSFeatures), - OSVersion: manifest.Platform.OSVersion, - Variant: manifest.Platform.Variant, - }, - } - components = append(components, converted) - } - oci := OCI1IndexFromComponents(components, nil) - return oci, nil -} - -// ToSchema2List returns the list encoded as a Schema2 list. -func (list *Schema2List) ToSchema2List() (*Schema2List, error) { - return Schema2ListClone(list), nil + return manifest.Schema2ListPublicClone(list) } // Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled // JSON, presumably generated by encoding a Schema2 manifest list. -func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { - list := Schema2List{ - Manifests: []Schema2ManifestDescriptor{}, - } - if err := json.Unmarshal(manifest, &list); err != nil { - return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err) - } - if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType, - allowedFieldManifests); err != nil { - return nil, err - } - return &list, nil -} - -// Clone returns a deep copy of this list and its contents. -func (list *Schema2List) Clone() List { - return Schema2ListClone(list) -} - -// ConvertToMIMEType converts the passed-in manifest list to a manifest -// list of the specified type. -func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) { - switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { - case DockerV2ListMediaType: - return list.Clone(), nil - case imgspecv1.MediaTypeImageIndex: - return list.ToOCI1Index() - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType) - default: - // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType) - } +func Schema2ListFromManifest(manifestBlob []byte) (*Schema2List, error) { + return manifest.Schema2ListPublicFromManifest(manifestBlob) } diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go index 58982597e..1d6fdc9f5 100644 --- a/vendor/github.com/containers/image/v5/manifest/list.go +++ b/vendor/github.com/containers/image/v5/manifest/list.go @@ -1,10 +1,7 @@ package manifest import ( - "fmt" - - "github.com/containers/image/v5/types" - digest "github.com/opencontainers/go-digest" + "github.com/containers/image/v5/internal/manifest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -21,56 +18,14 @@ var ( // Callers can either use this abstract interface without understanding the details of the formats, // or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members // directly. -type List interface { - // MIMEType returns the MIME type of this particular manifest list. - MIMEType() string - - // Instances returns a list of the manifests that this list knows of, other than its own. - Instances() []digest.Digest - - // Update information about the list's instances. The length of the passed-in slice must - // match the length of the list of instances which the list already contains, and every field - // must be specified. - UpdateInstances([]ListUpdate) error - - // Instance returns the size and MIME type of a particular instance in the list. - Instance(digest.Digest) (ListUpdate, error) - - // ChooseInstance selects which manifest is most appropriate for the platform described by the - // SystemContext, or for the current platform if the SystemContext doesn't specify any details. - ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) - - // Serialize returns the list in a blob format. - // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded - // from, even if no modifications were made! - Serialize() ([]byte, error) - - // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error. - ConvertToMIMEType(mimeType string) (List, error) - - // Clone returns a deep copy of this list and its contents. - Clone() List -} +type List = manifest.ListPublic // ListUpdate includes the fields which a List's UpdateInstances() method will modify. -type ListUpdate struct { - Digest digest.Digest - Size int64 - MediaType string -} +type ListUpdate = manifest.ListUpdate // ListFromBlob parses a list of manifests. -func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { - normalized := NormalizedMIMEType(manifestMIMEType) - switch normalized { - case DockerV2ListMediaType: - return Schema2ListFromManifest(manifest) - case imgspecv1.MediaTypeImageIndex: - return OCI1IndexFromManifest(manifest) - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") - } - return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized) +func ListFromBlob(manifestBlob []byte, manifestMIMEType string) (List, error) { + return manifest.ListPublicFromBlob(manifestBlob, manifestMIMEType) } // ConvertListToMIMEType converts the passed-in manifest list to a manifest diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 53fc866a7..959aac935 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -1,10 +1,9 @@ package manifest import ( - "encoding/json" "fmt" - internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/types" "github.com/containers/libtrust" digest "github.com/opencontainers/go-digest" @@ -16,28 +15,28 @@ import ( // FIXME(runcom, mitr): should we have a mediatype pkg?? const ( // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 - DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" + DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature - DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 - DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. - DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + DockerV2Schema2ConfigMediaType = manifest.DockerV2Schema2ConfigMediaType // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. - DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. - DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" + DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list - DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + DockerV2ListMediaType = manifest.DockerV2ListMediaType // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" + DockerV2Schema2ForeignLayerMediaType = manifest.DockerV2Schema2ForeignLayerMediaType // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + DockerV2Schema2ForeignLayerMediaTypeGzip = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip ) // NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation // on an object which is not a “container image” in the standard sense (e.g. an OCI artifact) -type NonImageArtifactError = internalManifest.NonImageArtifactError +type NonImageArtifactError = manifest.NonImageArtifactError // SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. func SupportedSchema2MediaType(m string) error { @@ -102,102 +101,21 @@ type LayerInfo struct { // GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. // FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, // but we may not have such metadata available (e.g. when the manifest is a local file). -func GuessMIMEType(manifest []byte) string { - // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. - // Also docker/distribution/manifest.Versioned. - meta := struct { - MediaType string `json:"mediaType"` - SchemaVersion int `json:"schemaVersion"` - Signatures interface{} `json:"signatures"` - }{} - if err := json.Unmarshal(manifest, &meta); err != nil { - return "" - } - - switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType, - imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. - return meta.MediaType - } - // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. - switch meta.SchemaVersion { - case 1: - if meta.Signatures != nil { - return DockerV2Schema1SignedMediaType - } - return DockerV2Schema1MediaType - case 2: - // Best effort to understand if this is an OCI image since mediaType - // wasn't in the manifest for OCI image-spec < 1.0.2. - // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. - ociMan := struct { - Config struct { - MediaType string `json:"mediaType"` - } `json:"config"` - }{} - if err := json.Unmarshal(manifest, &ociMan); err != nil { - return "" - } - switch ociMan.Config.MediaType { - case imgspecv1.MediaTypeImageConfig: - return imgspecv1.MediaTypeImageManifest - case DockerV2Schema2ConfigMediaType: - // This case should not happen since a Docker image - // must declare a top-level media type and - // `meta.MediaType` has already been checked. - return DockerV2Schema2MediaType - } - // Maybe an image index or an OCI artifact. - ociIndex := struct { - Manifests []imgspecv1.Descriptor `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &ociIndex); err != nil { - return "" - } - if len(ociIndex.Manifests) != 0 { - if ociMan.Config.MediaType == "" { - return imgspecv1.MediaTypeImageIndex - } - // FIXME: this is mixing media types of manifests and configs. - return ociMan.Config.MediaType - } - // It's most likely an OCI artifact with a custom config media - // type which is not (and cannot) be covered by the media-type - // checks cabove. - return imgspecv1.MediaTypeImageManifest - } - return "" +func GuessMIMEType(manifestBlob []byte) string { + return manifest.GuessMIMEType(manifestBlob) } // Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. -func Digest(manifest []byte) (digest.Digest, error) { - if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { - sig, err := libtrust.ParsePrettySignature(manifest, "signatures") - if err != nil { - return "", err - } - manifest, err = sig.Payload() - if err != nil { - // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string - // that libtrust itself has josebase64UrlEncode()d - return "", err - } - } - - return digest.FromBytes(manifest), nil +func Digest(manifestBlob []byte) (digest.Digest, error) { + return manifest.Digest(manifestBlob) } // MatchesDigest returns true iff the manifest matches expectedDigest. // Error may be set if this returns false. // Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, // or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. -func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { - // This should eventually support various digest types. - actualDigest, err := Digest(manifest) - if err != nil { - return false, err - } - return expectedDigest == actualDigest, nil +func MatchesDigest(manifestBlob []byte, expectedDigest digest.Digest) (bool, error) { + return manifest.MatchesDigest(manifestBlob, expectedDigest) } // AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. @@ -231,30 +149,7 @@ func MIMETypeSupportsEncryption(mimeType string) bool { // NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, // centralizing various workarounds. func NormalizedMIMEType(input string) string { - switch input { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case "application/json": - return DockerV2Schema1SignedMediaType - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, - imgspecv1.MediaTypeImageManifest, - imgspecv1.MediaTypeImageIndex, - DockerV2Schema2MediaType, - DockerV2ListMediaType: - return input - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return DockerV2Schema1SignedMediaType - } + return manifest.NormalizedMIMEType(input) } // FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 2c52423d9..eb2354768 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -5,13 +5,14 @@ import ( "fmt" "strings" - internalManifest "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/internal/manifest" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" ociencspec "github.com/containers/ocicrypt/spec" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" ) // BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. @@ -49,13 +50,13 @@ func SupportedOCI1MediaType(m string) error { } // OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. -func OCI1FromManifest(manifest []byte) (*OCI1, error) { +func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) { oci1 := OCI1{} - if err := json.Unmarshal(manifest, &oci1); err != nil { + if err := json.Unmarshal(manifestBlob, &oci1); err != nil { return nil, err } - if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, - allowedFieldConfig|allowedFieldLayers); err != nil { + if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex, + manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { return nil, err } return &oci1, nil @@ -160,10 +161,8 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // getEncryptedMediaType will return the mediatype to its encrypted counterpart and return // an error if the mediatype does not support encryption func getEncryptedMediaType(mediatype string) (string, error) { - for _, s := range strings.Split(mediatype, "+")[1:] { - if s == "encrypted" { - return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype) - } + if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") { + return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype) } unsuffixedMediatype := strings.Split(mediatype, "+")[0] switch unsuffixedMediatype { @@ -178,7 +177,7 @@ func getEncryptedMediaType(mediatype string) (string, error) { // an error if the mediatype does not support decryption func getDecryptedMediaType(mediatype string) (string, error) { if !strings.HasSuffix(mediatype, "+encrypted") { - return "", fmt.Errorf("unsupported mediaType to decrypt %v:", mediatype) + return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype) } return strings.TrimSuffix(mediatype, "+encrypted"), nil @@ -197,7 +196,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type // Most software calling this without human intervention is going to expect the values to be realistic and relevant, // and is probably better served by failing; we can always re-visit that later if we fail now, but // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later. - return nil, internalManifest.NewNonImageArtifactError(m.Config.MediaType) + return nil, manifest.NewNonImageArtifactError(m.Config.MediaType) } config, err := configGetter(m.ConfigInfo()) @@ -248,7 +247,7 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) { // (The only known caller of ImageID is storage/storageImageDestination.computeID, // which can’t work with non-image artifacts.) if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return "", internalManifest.NewNonImageArtifactError(m.Config.MediaType) + return "", manifest.NewNonImageArtifactError(m.Config.MediaType) } if err := m.Config.Digest.Validate(); err != nil { diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go index 726207b9d..193b08935 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -1,232 +1,27 @@ package manifest import ( - "encoding/json" - "fmt" - "runtime" - - platform "github.com/containers/image/v5/internal/pkg/platform" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspec "github.com/opencontainers/image-spec/specs-go" + "github.com/containers/image/v5/internal/manifest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) // OCI1Index is just an alias for the OCI index type, but one which we can // provide methods for. -type OCI1Index struct { - imgspecv1.Index -} - -// MIMEType returns the MIME type of this particular manifest index. -func (index *OCI1Index) MIMEType() string { - return imgspecv1.MediaTypeImageIndex -} - -// Instances returns a slice of digests of the manifests that this index knows of. -func (index *OCI1Index) Instances() []digest.Digest { - results := make([]digest.Digest, len(index.Manifests)) - for i, m := range index.Manifests { - results[i] = m.Digest - } - return results -} - -// Instance returns the ListUpdate of a particular instance in the index. -func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) { - for _, manifest := range index.Manifests { - if manifest.Digest == instanceDigest { - return ListUpdate{ - Digest: manifest.Digest, - Size: manifest.Size, - MediaType: manifest.MediaType, - }, nil - } - } - return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest) -} - -// UpdateInstances updates the sizes, digests, and media types of the manifests -// which the list catalogs. -func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error { - if len(updates) != len(index.Manifests) { - return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) - } - for i := range updates { - if err := updates[i].Digest.Validate(); err != nil { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err) - } - index.Manifests[i].Digest = updates[i].Digest - if updates[i].Size < 0 { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) - } - index.Manifests[i].Size = updates[i].Size - if updates[i].MediaType == "" { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) - } - index.Manifests[i].MediaType = updates[i].MediaType - } - return nil -} - -// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest -// of the image which is appropriate for the current environment. -func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - wantedPlatforms, err := platform.WantedPlatforms(ctx) - if err != nil { - return "", fmt.Errorf("getting platform information %#v: %w", ctx, err) - } - for _, wantedPlatform := range wantedPlatforms { - for _, d := range index.Manifests { - if d.Platform == nil { - continue - } - imagePlatform := imgspecv1.Platform{ - Architecture: d.Platform.Architecture, - OS: d.Platform.OS, - OSVersion: d.Platform.OSVersion, - OSFeatures: dupStringSlice(d.Platform.OSFeatures), - Variant: d.Platform.Variant, - } - if platform.MatchesPlatform(imagePlatform, wantedPlatform) { - return d.Digest, nil - } - } - } - - for _, d := range index.Manifests { - if d.Platform == nil { - return d.Digest, nil - } - } - return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) -} - -// Serialize returns the index in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (index *OCI1Index) Serialize() ([]byte, error) { - buf, err := json.Marshal(index) - if err != nil { - return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err) - } - return buf, nil -} +type OCI1Index = manifest.OCI1IndexPublic // OCI1IndexFromComponents creates an OCI1 image index instance from the // supplied data. func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index { - index := OCI1Index{ - imgspecv1.Index{ - Versioned: imgspec.Versioned{SchemaVersion: 2}, - MediaType: imgspecv1.MediaTypeImageIndex, - Manifests: make([]imgspecv1.Descriptor, len(components)), - Annotations: dupStringStringMap(annotations), - }, - } - for i, component := range components { - var platform *imgspecv1.Platform - if component.Platform != nil { - platform = &imgspecv1.Platform{ - Architecture: component.Platform.Architecture, - OS: component.Platform.OS, - OSVersion: component.Platform.OSVersion, - OSFeatures: dupStringSlice(component.Platform.OSFeatures), - Variant: component.Platform.Variant, - } - } - m := imgspecv1.Descriptor{ - MediaType: component.MediaType, - Size: component.Size, - Digest: component.Digest, - URLs: dupStringSlice(component.URLs), - Annotations: dupStringStringMap(component.Annotations), - Platform: platform, - } - index.Manifests[i] = m - } - return &index + return manifest.OCI1IndexPublicFromComponents(components, annotations) } // OCI1IndexClone creates a deep copy of the passed-in index. func OCI1IndexClone(index *OCI1Index) *OCI1Index { - return OCI1IndexFromComponents(index.Manifests, index.Annotations) -} - -// ToOCI1Index returns the index encoded as an OCI1 index. -func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) { - return OCI1IndexClone(index), nil -} - -// ToSchema2List returns the index encoded as a Schema2 list. -func (index *OCI1Index) ToSchema2List() (*Schema2List, error) { - components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests)) - for _, manifest := range index.Manifests { - platform := manifest.Platform - if platform == nil { - platform = &imgspecv1.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - } - } - converted := Schema2ManifestDescriptor{ - Schema2Descriptor{ - MediaType: manifest.MediaType, - Size: manifest.Size, - Digest: manifest.Digest, - URLs: dupStringSlice(manifest.URLs), - }, - Schema2PlatformSpec{ - OS: platform.OS, - Architecture: platform.Architecture, - OSFeatures: dupStringSlice(platform.OSFeatures), - OSVersion: platform.OSVersion, - Variant: platform.Variant, - }, - } - components = append(components, converted) - } - s2 := Schema2ListFromComponents(components) - return s2, nil + return manifest.OCI1IndexPublicClone(index) } // OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled // JSON, presumably generated by encoding a OCI1 manifest index. -func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { - index := OCI1Index{ - Index: imgspecv1.Index{ - Versioned: imgspec.Versioned{SchemaVersion: 2}, - MediaType: imgspecv1.MediaTypeImageIndex, - Manifests: []imgspecv1.Descriptor{}, - Annotations: make(map[string]string), - }, - } - if err := json.Unmarshal(manifest, &index); err != nil { - return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err) - } - if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, - allowedFieldManifests); err != nil { - return nil, err - } - return &index, nil -} - -// Clone returns a deep copy of this list and its contents. -func (index *OCI1Index) Clone() List { - return OCI1IndexClone(index) -} - -// ConvertToMIMEType converts the passed-in image index to a manifest list of -// the specified type. -func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) { - switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { - case DockerV2ListMediaType: - return index.ToSchema2List() - case imgspecv1.MediaTypeImageIndex: - return index.Clone(), nil - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType) - default: - // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType) - } +func OCI1IndexFromManifest(manifestBlob []byte) (*OCI1Index, error) { + return manifest.OCI1IndexPublicFromManifest(manifestBlob) } diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go index f710be10b..8386c47a3 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_dest.go @@ -109,8 +109,8 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options) } @@ -119,18 +119,16 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { +func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options) } diff --git a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go index 148bc12fa..53827b11a 100644 --- a/vendor/github.com/containers/image/v5/oci/internal/oci_util.go +++ b/vendor/github.com/containers/image/v5/oci/internal/oci_util.go @@ -58,13 +58,7 @@ func splitPathAndImageWindows(reference string) (string, string) { } func splitPathAndImageNonWindows(reference string) (string, string) { - sep := strings.SplitN(reference, ":", 2) - path := sep[0] - - var image string - if len(sep) == 2 { - image = sep[1] - } + path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":" return path, image } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 4face7213..0a9e4eab9 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -12,9 +12,9 @@ import ( "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" - "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" @@ -107,11 +107,11 @@ func (d *ociImageDestination) Close() error { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob") if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } succeeded := false explicitClosed := false @@ -128,14 +128,14 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(blobFile, stream) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. @@ -144,52 +144,50 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io. // always fails on Windows. if runtime.GOOS != "windows" { if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } } blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } if err := ensureParentDirectoryExists(blobPath); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } // need to explicitly close the file, since a rename won't otherwise not work on Windows blobFile.Close() explicitClosed = true if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } succeeded = true - return types.BlobInfo{Digest: blobDigest, Size: size}, nil + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { if info.Digest == "" { - return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest") + return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } finfo, err := os.Stat(blobPath) if err != nil && os.IsNotExist(err) { - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } - return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil + return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil } // PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types, diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go index 408af20a4..6b423f3b0 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -12,8 +12,8 @@ import ( "github.com/containers/image/v5/internal/imagesource/impl" "github.com/containers/image/v5/internal/imagesource/stubs" + "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/types" "github.com/docker/go-connections/tlsconfig" @@ -94,6 +94,7 @@ func (s *ociImageSource) Reference() types.ImageReference { // Close removes resources associated with an initialized ImageSource, if any. func (s *ociImageSource) Close() error { + s.client.CloseIdleConnections() return nil } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index 5375d3324..4a4ab9b2c 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -100,7 +100,7 @@ func (ref ociReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref ociReference) StringWithinTransport() string { return fmt.Sprintf("%s:%s", ref.dir, ref.image) diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index 42e8970a0..0b737f020 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -3,11 +3,12 @@ package openshift import ( "crypto/tls" "crypto/x509" - "encoding/json" + "encoding/base64" "errors" "fmt" "net" "net/http" + "net/netip" "net/url" "os" "path" @@ -17,10 +18,10 @@ import ( "time" "github.com/containers/storage/pkg/homedir" - "github.com/ghodss/yaml" "github.com/imdario/mergo" "github.com/sirupsen/logrus" - "golang.org/x/net/http2" + "golang.org/x/exp/slices" + "gopkg.in/yaml.v3" ) // restTLSClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.TLSClientConfig. @@ -205,15 +206,12 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) { if isConfigTransportTLS(*clientConfig) { var err error // REMOVED: Support for interactive fallback. - userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) - if err != nil { - return nil, err - } + userAuthPartialConfig := getUserIdentificationPartialConfig(configAuthInfo) if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil { return nil, err } - serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) + serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configClusterInfo) if err != nil { return nil, err } @@ -232,7 +230,7 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) { // 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) // 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) // 3. load the ~/.kubernetes_auth file as a default -func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) { +func getServerIdentificationPartialConfig(configClusterInfo clientcmdCluster) (*restConfig, error) { mergedConfig := &restConfig{} // configClusterInfo holds the information identify the server provided by .kubeconfig @@ -255,7 +253,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, conf // 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) // 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file // 4. if there is not enough information to identify the user, prompt if possible -func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { +func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) *restConfig { mergedConfig := &restConfig{} // blindly overwrite existing values based on precedence @@ -274,7 +272,7 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*rest } // REMOVED: prompting for missing information. - return mergedConfig, nil + return mergedConfig } // ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. @@ -672,11 +670,7 @@ func load(data []byte) (*clientcmdConfig, error) { return config, nil } // Note: This does absolutely no kind/version checking or conversions. - data, err := yaml.YAMLToJSON(data) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, config); err != nil { + if err := yaml.Unmarshal(data, config); err != nil { return nil, err } return config, nil @@ -877,11 +871,11 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error noProxyEnv := os.Getenv("NO_PROXY") noProxyRules := strings.Split(noProxyEnv, ",") - cidrs := []*net.IPNet{} + cidrs := []netip.Prefix{} for _, noProxyRule := range noProxyRules { - _, cidr, _ := net.ParseCIDR(noProxyRule) - if cidr != nil { - cidrs = append(cidrs, cidr) + prefix, err := netip.ParsePrefix(noProxyRule) + if err == nil { + cidrs = append(cidrs, prefix) } } @@ -892,7 +886,7 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error return func(req *http.Request) (*url.URL, error) { host := req.URL.Host // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { + if _, err := netip.ParseAddr(host); err != nil { var err error host, _, err = net.SplitHostPort(req.URL.Host) if err != nil { @@ -900,15 +894,15 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error } } - ip := net.ParseIP(host) - if ip == nil { + ip, err := netip.ParseAddr(host) + if err != nil { return delegate(req) } - for _, cidr := range cidrs { - if cidr.Contains(ip) { - return nil, nil - } + if slices.ContainsFunc(cidrs, func(cidr netip.Prefix) bool { + return cidr.Contains(ip) + }) { + return nil, nil } return delegate(req) @@ -936,14 +930,14 @@ func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, - Dial: (&net.Dialer{ + DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, - }).Dial, + }).DialContext, } // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { - _ = http2.ConfigureTransport(t) + t.ForceAttemptHTTP2 = true } return t, nil } @@ -1057,20 +1051,20 @@ func (c *restConfig) HasCertAuth() bool { // IMPORTANT if you add fields to this struct, please update IsConfigEmpty() type clientcmdConfig struct { // Clusters is a map of referenceable names to cluster configs - Clusters clustersMap `json:"clusters"` + Clusters clustersMap `yaml:"clusters"` // AuthInfos is a map of referenceable names to user configs - AuthInfos authInfosMap `json:"users"` + AuthInfos authInfosMap `yaml:"users"` // Contexts is a map of referenceable names to context configs - Contexts contextsMap `json:"contexts"` + Contexts contextsMap `yaml:"contexts"` // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` + CurrentContext string `yaml:"current-context"` } type clustersMap map[string]*clientcmdCluster -func (m *clustersMap) UnmarshalJSON(data []byte) error { +func (m *clustersMap) UnmarshalYAML(value *yaml.Node) error { var a []v1NamedCluster - if err := json.Unmarshal(data, &a); err != nil { + if err := value.Decode(&a); err != nil { return err } for _, e := range a { @@ -1082,9 +1076,9 @@ func (m *clustersMap) UnmarshalJSON(data []byte) error { type authInfosMap map[string]*clientcmdAuthInfo -func (m *authInfosMap) UnmarshalJSON(data []byte) error { +func (m *authInfosMap) UnmarshalYAML(value *yaml.Node) error { var a []v1NamedAuthInfo - if err := json.Unmarshal(data, &a); err != nil { + if err := value.Decode(&a); err != nil { return err } for _, e := range a { @@ -1096,9 +1090,9 @@ func (m *authInfosMap) UnmarshalJSON(data []byte) error { type contextsMap map[string]*clientcmdContext -func (m *contextsMap) UnmarshalJSON(data []byte) error { +func (m *contextsMap) UnmarshalYAML(value *yaml.Node) error { var a []v1NamedContext - if err := json.Unmarshal(data, &a); err != nil { + if err := value.Decode(&a); err != nil { return err } for _, e := range a { @@ -1118,19 +1112,32 @@ func clientcmdNewConfig() *clientcmdConfig { } } +// yamlBinaryAsBase64String is a []byte that can be stored in yaml as a !!str, not a !!binary +type yamlBinaryAsBase64String []byte + +func (bin *yamlBinaryAsBase64String) UnmarshalText(text []byte) error { + res := make([]byte, base64.StdEncoding.DecodedLen(len(text))) + n, err := base64.StdEncoding.Decode(res, text) + if err != nil { + return err + } + *bin = res[:n] + return nil +} + // clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. // Cluster contains information about how to communicate with a kubernetes cluster type clientcmdCluster struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. LocationOfOrigin string // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` + Server string `yaml:"server"` // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + InsecureSkipTLSVerify bool `yaml:"insecure-skip-tls-verify,omitempty"` // CertificateAuthority is the path to a cert file for the certificate authority. - CertificateAuthority string `json:"certificate-authority,omitempty"` + CertificateAuthority string `yaml:"certificate-authority,omitempty"` // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + CertificateAuthorityData yamlBinaryAsBase64String `yaml:"certificate-authority-data,omitempty"` } // clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. @@ -1139,19 +1146,19 @@ type clientcmdAuthInfo struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. LocationOfOrigin string // ClientCertificate is the path to a client cert file for TLS. - ClientCertificate string `json:"client-certificate,omitempty"` + ClientCertificate string `yaml:"client-certificate,omitempty"` // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + ClientCertificateData yamlBinaryAsBase64String `yaml:"client-certificate-data,omitempty"` // ClientKey is the path to a client key file for TLS. - ClientKey string `json:"client-key,omitempty"` + ClientKey string `yaml:"client-key,omitempty"` // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - ClientKeyData []byte `json:"client-key-data,omitempty"` + ClientKeyData yamlBinaryAsBase64String `yaml:"client-key-data,omitempty"` // Token is the bearer token for authentication to the kubernetes cluster. - Token string `json:"token,omitempty"` + Token string `yaml:"token,omitempty"` // Username is the username for basic authentication to the kubernetes cluster. - Username string `json:"username,omitempty"` + Username string `yaml:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. - Password string `json:"password,omitempty"` + Password string `yaml:"password,omitempty"` } // clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. @@ -1160,36 +1167,36 @@ type clientcmdContext struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. LocationOfOrigin string // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` + Cluster string `yaml:"cluster"` // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` + AuthInfo string `yaml:"user"` // Namespace is the default namespace to use on unspecified requests - Namespace string `json:"namespace,omitempty"` + Namespace string `yaml:"namespace,omitempty"` } // v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. // NamedCluster relates nicknames to cluster information type v1NamedCluster struct { // Name is the nickname for this Cluster - Name string `json:"name"` + Name string `yaml:"name"` // Cluster holds the cluster information - Cluster clientcmdCluster `json:"cluster"` + Cluster clientcmdCluster `yaml:"cluster"` } // v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. // NamedContext relates nicknames to context information type v1NamedContext struct { // Name is the nickname for this Context - Name string `json:"name"` + Name string `yaml:"name"` // Context holds the context information - Context clientcmdContext `json:"context"` + Context clientcmdContext `yaml:"context"` } // v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. // NamedAuthInfo relates nicknames to auth information type v1NamedAuthInfo struct { // Name is the nickname for this AuthInfo - Name string `json:"name"` + Name string `yaml:"name"` // AuthInfo holds the auth information - AuthInfo clientcmdAuthInfo `json:"user"` + AuthInfo clientcmdAuthInfo `yaml:"user"` } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index 38f5d531d..f3d5662e6 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -146,11 +146,11 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str // convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; // currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { - parts := strings.SplitN(ref, "/", 2) - if len(parts) != 2 { + _, repo, gotRepo := strings.Cut(ref, "/") + if !gotRepo { return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref) } - return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil + return reference.Domain(c.ref.dockerReference) + "/" + repo, nil } // These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index d5dbaf27e..7b1b5dfcd 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -17,10 +17,12 @@ import ( "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" "github.com/containers/image/v5/internal/private" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" ) type openshiftImageDestination struct { @@ -114,8 +116,8 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool { // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. +func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options) } @@ -124,18 +126,16 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { +func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache) } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { return d.docker.TryReusingBlobWithOptions(ctx, info, options) } @@ -180,12 +180,11 @@ func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context, if err != nil { return err } - existingSigNames := map[string]struct{}{} + existingSigNames := set.New[string]() for _, sig := range image.Signatures { - existingSigNames[sig.objectMeta.Name] = struct{}{} + existingSigNames.Add(sig.objectMeta.Name) } -sigExists: for _, newSigWithFormat := range signatures { newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning) if !ok { @@ -193,10 +192,10 @@ sigExists: } newSig := newSigSimple.UntrustedSignature() - for _, existingSig := range image.Signatures { - if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } + if slices.ContainsFunc(image.Signatures, func(existingSig imageSignature) bool { + return existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) + }) { + continue } // The API expect us to invent a new unique name. This is racy, but hopefully good enough. @@ -208,7 +207,7 @@ sigExists: return fmt.Errorf("generating random signature len %d: %w", n, err) } signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes) - if _, ok := existingSigNames[signatureName]; !ok { + if !existingSigNames.Contains(signatureName) { break } } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go index 159903944..0ba435d56 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_transport.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_transport.go @@ -89,7 +89,7 @@ func (ref openshiftReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref openshiftReference) StringWithinTransport() string { return reference.FamiliarString(ref.dockerReference) diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index 929523fa6..48f3ee5a7 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -69,7 +69,7 @@ type manifestSchema struct { } type ostreeImageDestination struct { - compat impl.Compat + impl.Compat impl.PropertyMethodsInitialize stubs.NoPutBlobPartialInitialize stubs.AlwaysSupportsSignatures @@ -135,16 +135,16 @@ func (d *ostreeImageDestination) Close() error { // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { +func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob") if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } blobPath := filepath.Join(tmpDir, "content") blobFile, err := os.Create(blobPath) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } defer blobFile.Close() @@ -152,19 +152,19 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(blobFile, stream) if err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } blobDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) + return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err + return private.UploadedBlob{}, err } hash := blobDigest.Hex() d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath} - return types.BlobInfo{Digest: blobDigest, Size: size}, nil + return private.UploadedBlob{Digest: blobDigest, Size: size}, nil } func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { @@ -334,11 +334,11 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be // reflected in the manifest that will be written. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { if d.repo == nil { repo, err := openRepo(d.ref.repo) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } d.repo = repo } @@ -346,25 +346,25 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") if err != nil || !found { - return found, types.BlobInfo{}, err + return found, private.ReusedBlob{}, err } found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") if err != nil || !found { - return found, types.BlobInfo{}, err + return found, private.ReusedBlob{}, err } found, data, err = readMetadata(d.repo, branch, "docker.size") if err != nil || !found { - return found, types.BlobInfo{}, err + return found, private.ReusedBlob{}, err } size, err := strconv.ParseInt(data, 10, 64) if err != nil { - return false, types.BlobInfo{}, err + return false, private.ReusedBlob{}, err } - return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil + return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil } // PutManifest writes manifest to the destination. diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go index 14a84414a..d83f85b90 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_transport.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_transport.go @@ -75,12 +75,11 @@ type ostreeImageCloser struct { func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { var repo = "" - var image = "" - s := strings.SplitN(ref, "@/", 2) - if len(s) == 1 { - image, repo = s[0], defaultOSTreeRepo + image, repoPart, gotRepoPart := strings.Cut(ref, "@/") + if !gotRepoPart { + repo = defaultOSTreeRepo } else { - image, repo = s[0], "/"+s[1] + repo = "/" + repoPart } return NewReference(image, repo) @@ -134,7 +133,7 @@ func (ref ostreeReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref ostreeReference) StringWithinTransport() string { return fmt.Sprintf("%s@%s", ref.image, ref.repo) @@ -157,11 +156,11 @@ func (ref ostreeReference) PolicyConfigurationIdentity() string { // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref ostreeReference) PolicyConfigurationNamespaces() []string { - s := strings.SplitN(ref.image, ":", 2) - if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. + repo, _, gotTag := strings.Cut(ref.image, ":") + if !gotTag { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) } - name := s[0] + name := repo res := []string{} for { res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 426640366..427610fab 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -6,6 +6,7 @@ import ( "time" "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" @@ -19,12 +20,12 @@ type locationKey struct { blobDigest digest.Digest } -// cache implements an in-memory-only BlobInfoCache +// cache implements an in-memory-only BlobInfoCache. type cache struct { mutex sync.Mutex // The following fields can only be accessed with mutex held. uncompressedDigests map[digest.Digest]digest.Digest - digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest + digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest } @@ -44,7 +45,7 @@ func New() types.BlobInfoCache { func new2() *cache { return &cache{ uncompressedDigests: map[digest.Digest]digest.Digest{}, - digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, + digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, compressors: map[digest.Digest]string{}, } @@ -67,7 +68,7 @@ func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Diges // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings // when we already record a (compressed, uncompressed) pair. - if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { + if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() { return anyDigest } return "" @@ -88,10 +89,10 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] if !ok { - anyDigestSet = map[digest.Digest]struct{}{} + anyDigestSet = set.New[digest.Digest]() mem.digestsByUncompressed[uncompressed] = anyDigestSet } - anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. + anyDigestSet.Add(anyDigest) } // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, @@ -171,10 +172,11 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types var uncompressedDigest digest.Digest // = "" if canSubstitute { if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { - otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map - for d := range otherDigests { - if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) + if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok { + for _, d := range otherDigests.Values() { + if d != primaryDigest && d != uncompressedDigest { + res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo) + } } } if uncompressedDigest != primaryDigest { diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go index ce688d117..4443dda7f 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -30,7 +30,7 @@ var ( // Zstd compression. Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName, []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) - // Zstd:chunked compression. + // ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files. ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */ nil, ZstdDecompressor, compressor.ZstdCompressor) diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go index fb37ca317..ba619be00 100644 --- a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go +++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go @@ -44,21 +44,21 @@ func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string { } // AlgorithmCompressor returns the compressor field of algo. -// This is a function instead of a public method so that it is only callable from by code +// This is a function instead of a public method so that it is only callable by code // that is allowed to import this internal subpackage. func AlgorithmCompressor(algo Algorithm) CompressorFunc { return algo.compressor } // AlgorithmDecompressor returns the decompressor field of algo. -// This is a function instead of a public method so that it is only callable from by code +// This is a function instead of a public method so that it is only callable by code // that is allowed to import this internal subpackage. func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { return algo.decompressor } // AlgorithmPrefix returns the prefix field of algo. -// This is a function instead of a public method so that it is only callable from by code +// This is a function instead of a public method so that it is only callable by code // that is allowed to import this internal subpackage. func AlgorithmPrefix(algo Algorithm) []byte { return algo.prefix diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index c363cb535..0e3003cec 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -12,6 +12,7 @@ import ( "strings" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" @@ -139,10 +140,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // possible sources, and then call `GetCredentials` on them. That // prevents us from having to reverse engineer the logic in // `GetCredentials`. - allKeys := make(map[string]bool) - addKey := func(s string) { - allKeys[s] = true - } + allKeys := set.New[string]() // To use GetCredentials, we must at least convert the URL forms into host names. // While we're at it, we’ll also canonicalize docker.io to the standard format. @@ -166,14 +164,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // direct mapping to a registry, so we can just // walk the map. for registry := range auths.CredHelpers { - addKey(registry) + allKeys.Add(registry) } for key := range auths.AuthConfigs { key := normalizeAuthFileKey(key, path.legacyFormat) if key == normalizedDockerIORegistry { key = "docker.io" } - addKey(key) + allKeys.Add(key) } } // External helpers. @@ -188,7 +186,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon } } for registry := range creds { - addKey(registry) + allKeys.Add(registry) } } } @@ -196,7 +194,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon // Now use `GetCredentials` to the specific auth configs for each // previously listed registry. authConfigs := make(map[string]types.DockerAuthConfig) - for key := range allKeys { + for _, key := range allKeys.Values() { authConf, err := GetCredentials(sys, key) if err != nil { // Note: we rely on the logging in `GetCredentials`. @@ -394,17 +392,16 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { if isNamespaced { logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper) return - } else { - err := deleteAuthFromCredHelper(helper, key) - if err == nil { - logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper) - isLoggedIn = true - return - } - if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { - logrus.Debugf("Not logged in to %s with credential helper %s", key, helper) - return - } + } + err := deleteAuthFromCredHelper(helper, key) + if err == nil { + logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper) + isLoggedIn = true + return + } + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + logrus.Debugf("Not logged in to %s with credential helper %s", key, helper) + return } multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err)) } @@ -759,8 +756,8 @@ func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuth return types.DockerAuthConfig{}, err } - parts := strings.SplitN(string(decoded), ":", 2) - if len(parts) != 2 { + user, passwordPart, valid := strings.Cut(string(decoded), ":") + if !valid { // if it's invalid just skip, as docker does if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log. @@ -770,8 +767,7 @@ func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuth return types.DockerAuthConfig{}, nil } - user := parts[0] - password := strings.Trim(parts[1], "\x00") + password := strings.Trim(passwordPart, "\x00") return types.DockerAuthConfig{ Username: user, Password: password, @@ -786,7 +782,7 @@ func normalizeAuthFileKey(key string, legacyFormat bool) string { stripped = strings.TrimPrefix(stripped, "https://") if legacyFormat || stripped != key { - stripped = strings.SplitN(stripped, "/", 2)[0] + stripped, _, _ = strings.Cut(stripped, "/") } return normalizeRegistry(stripped) diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 7ebd3fd22..3a11542c6 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -14,6 +14,7 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/lockfile" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" ) // defaultShortNameMode is the default mode of registries.conf files if the @@ -308,9 +309,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl // updateWithConfigurationFrom updates c with configuration from updates. // In case of conflict, updates is preferred. func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) { - for name, value := range updates.namedAliases { - c.namedAliases[name] = value - } + maps.Copy(c.namedAliases, updates.namedAliases) } func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) { diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index 463e77028..f45fd9de1 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -16,6 +16,7 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/regexp" "github.com/sirupsen/logrus" + "golang.org/x/exp/maps" ) // systemRegistriesConfPath is the path to the system-wide registry @@ -1019,12 +1020,9 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { // Go maps have a non-deterministic order when iterating the keys, so // we dump them in a slice and sort it to enforce some order in // Registries slice. Some consumers of c/image (e.g., CRI-O) log the - // the configuration where a non-deterministic order could easily cause + // configuration where a non-deterministic order could easily cause // confusion. - prefixes := []string{} - for prefix := range registryMap { - prefixes = append(prefixes, prefix) - } + prefixes := maps.Keys(registryMap) sort.Strings(prefixes) c.partialV2.Registries = []Registry{} diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index 285203bad..56b0d4939 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -12,6 +12,7 @@ import ( "time" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc @@ -80,12 +81,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { } func hasFile(files []os.DirEntry, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false + return slices.ContainsFunc(files, func(f os.DirEntry) bool { + return f.Name() == name + }) } // NewTransport Creates a default transport @@ -93,14 +91,13 @@ func NewTransport() *http.Transport { direct := &net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, - DualStack: true, } tr := &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: direct.DialContext, TLSHandshakeTimeout: 10 * time.Second, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, + IdleConnTimeout: 90 * time.Second, + MaxIdleConns: 100, } return tr } diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go index 2037f2508..4c0901071 100644 --- a/vendor/github.com/containers/image/v5/sif/transport.go +++ b/vendor/github.com/containers/image/v5/sif/transport.go @@ -87,7 +87,7 @@ func (ref sifReference) Transport() types.ImageTransport { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; // instead, see transports.ImageName(). func (ref sifReference) StringWithinTransport() string { diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go index b09502dfe..d6075f811 100644 --- a/vendor/github.com/containers/image/v5/signature/docker.go +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -11,6 +11,7 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/signature/internal" "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" ) // SignOptions includes optional parameters for signing container images. @@ -50,15 +51,26 @@ func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, // using mech. func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { + sig, _, err := VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, []string{expectedKeyIdentity}) + return sig, err +} + +// VerifyImageManifestSignatureUsingKeyIdentityList checks that unverifiedSignature uses one of the expectedKeyIdentities +// to sign unverifiedManifest as expectedDockerReference, using mech. Returns the verified signature and the key identity that +// was used to verify it. +func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest []byte, + expectedDockerReference string, mech SigningMechanism, expectedKeyIdentities []string) (*Signature, string, error) { expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) if err != nil { - return nil, err + return nil, "", err } + var matchedKeyIdentity string sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ validateKeyIdentity: func(keyIdentity string) error { - if keyIdentity != expectedKeyIdentity { - return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)) + if !slices.Contains(expectedKeyIdentities, keyIdentity) { + return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprints %v", keyIdentity, expectedKeyIdentities)) } + matchedKeyIdentity = keyIdentity return nil }, validateSignedDockerReference: func(signedDockerReference string) error { @@ -84,7 +96,7 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt }, }) if err != nil { - return nil, err + return nil, "", err } - return sig, nil + return sig, matchedKeyIdentity, err } diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go index 2e7f44ce5..ef5d3df6f 100644 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -12,6 +12,7 @@ import ( "github.com/containers/image/v5/signature/internal" "github.com/sigstore/fulcio/pkg/certificate" "github.com/sigstore/sigstore/pkg/cryptoutils" + "golang.org/x/exp/slices" ) // fulcioTrustRoot contains policy allow validating Fulcio-issued certificates. @@ -32,6 +33,58 @@ func (f *fulcioTrustRoot) validate() error { return nil } +// fulcioIssuerInCertificate returns the OIDC issuer recorded by Fulcio in unutrustedCertificate; +// it fails if the extension is not present in the certificate, or on any inconsistency. +func fulcioIssuerInCertificate(untrustedCertificate *x509.Certificate) (string, error) { + // == Validate the recorded OIDC issuer + gotOIDCIssuer1 := false + gotOIDCIssuer2 := false + var oidcIssuer1, oidcIssuer2 string + // certificate.ParseExtensions doesn’t reject duplicate extensions, and doesn’t detect inconsistencies + // between certificate.OIDIssuer and certificate.OIDIssuerV2. + // Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19, + // reject duplicates manually. + for _, untrustedExt := range untrustedCertificate.Extensions { + if untrustedExt.Id.Equal(certificate.OIDIssuer) { //nolint:staticcheck // This is deprecated, but we must continue to accept it. + if gotOIDCIssuer1 { + // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions + // already in ParseCertificate. + return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v1 extension") + } + oidcIssuer1 = string(untrustedExt.Value) + gotOIDCIssuer1 = true + } else if untrustedExt.Id.Equal(certificate.OIDIssuerV2) { + if gotOIDCIssuer2 { + // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions + // already in ParseCertificate. + return "", internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer v2 extension") + } + rest, err := asn1.Unmarshal(untrustedExt.Value, &oidcIssuer2) + if err != nil { + return "", internal.NewInvalidSignatureError(fmt.Sprintf("invalid ASN.1 in OIDC issuer v2 extension: %v", err)) + } + if len(rest) != 0 { + return "", internal.NewInvalidSignatureError("invalid ASN.1 in OIDC issuer v2 extension, trailing data") + } + gotOIDCIssuer2 = true + } + } + switch { + case gotOIDCIssuer1 && gotOIDCIssuer2: + if oidcIssuer1 != oidcIssuer2 { + return "", internal.NewInvalidSignatureError(fmt.Sprintf("inconsistent OIDC issuer extension values: v1 %#v, v2 %#v", + oidcIssuer1, oidcIssuer2)) + } + return oidcIssuer1, nil + case gotOIDCIssuer1: + return oidcIssuer1, nil + case gotOIDCIssuer2: + return oidcIssuer2, nil + default: + return "", internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension") + } +} + func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte) (crypto.PublicKey, error) { // == Verify the certificate is correctly signed var untrustedIntermediatePool *x509.CertPool // = nil @@ -104,47 +157,24 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, // log of approved Fulcio invocations, and it’s not clear where that would come from, especially human users manually // logging in using OpenID are not going to maintain a record of those actions. // - // Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicous signatures + // Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicious signatures // by correctly-issued certificates. // // So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition, // the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to - // make make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary. + // make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary. // == Validate the recorded OIDC issuer - gotOIDCIssuer := false - var oidcIssuer string - // certificate.ParseExtensions doesn’t reject duplicate extensions. - // Go 1.19 rejects duplicate extensions universally; but until we can require Go 1.19, - // reject duplicates manually. With Go 1.19, we could call certificate.ParseExtensions again. - for _, untrustedExt := range untrustedCertificate.Extensions { - if untrustedExt.Id.Equal(certificate.OIDIssuer) { - if gotOIDCIssuer { - // Coverage: This is unreachable in Go ≥1.19, which rejects certificates with duplicate extensions - // already in ParseCertificate. - return nil, internal.NewInvalidSignatureError("Fulcio certificate has a duplicate OIDC issuer extension") - } - oidcIssuer = string(untrustedExt.Value) - gotOIDCIssuer = true - } - } - if !gotOIDCIssuer { - return nil, internal.NewInvalidSignatureError("Fulcio certificate is missing the issuer extension") + oidcIssuer, err := fulcioIssuerInCertificate(untrustedCertificate) + if err != nil { + return nil, err } if oidcIssuer != f.oidcIssuer { return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Unexpected Fulcio OIDC issuer %q", oidcIssuer)) } // == Validate the OIDC subject - foundEmail := false - // TO DO: Use slices.Contains after we update to Go 1.18 - for _, certEmail := range untrustedCertificate.EmailAddresses { - if certEmail == f.subjectEmail { - foundEmail = true - break - } - } - if !foundEmail { + if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) { return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)", f.subjectEmail, untrustedCertificate.EmailAddresses)) diff --git a/vendor/github.com/containers/image/v5/signature/internal/json.go b/vendor/github.com/containers/image/v5/signature/internal/json.go index 0f39fe0ad..a9d127e65 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/json.go +++ b/vendor/github.com/containers/image/v5/signature/internal/json.go @@ -5,6 +5,8 @@ import ( "encoding/json" "fmt" "io" + + "github.com/containers/image/v5/internal/set" ) // JSONFormatError is returned when JSON does not match expected format. @@ -20,8 +22,8 @@ func (err JSONFormatError) Error() string { // // The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, // we could use reflection to automate this. Later? -func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { - seenKeys := map[string]struct{}{} +func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error { + seenKeys := set.New[string]() dec := json.NewDecoder(bytes.NewReader(data)) t, err := dec.Token() @@ -45,10 +47,10 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) } - if _, ok := seenKeys[key]; ok { + if seenKeys.Contains(key) { return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) } - seenKeys[key] = struct{}{} + seenKeys.Add(key) valuePtr := fieldResolver(key) if valuePtr == nil { @@ -68,11 +70,11 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa // ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect // (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields // must be present exactly once, and none other fields are accepted. -func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { - seenKeys := map[string]struct{}{} - if err := ParanoidUnmarshalJSONObject(data, func(key string) interface{} { +func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error { + seenKeys := set.New[string]() + if err := ParanoidUnmarshalJSONObject(data, func(key string) any { if valuePtr, ok := exactFields[key]; ok { - seenKeys[key] = struct{}{} + seenKeys.Add(key) return valuePtr } return nil @@ -80,7 +82,7 @@ func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string] return err } for key := range exactFields { - if _, ok := seenKeys[key]; !ok { + if !seenKeys.Contains(key) { return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) } } diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go index 27c2c7e63..d439b5f7a 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -28,7 +28,7 @@ type UntrustedRekorSET struct { } type UntrustedRekorPayload struct { - Body []byte // In cosign, this is an interface{}, but only a string works + Body []byte // In cosign, this is an any, but only a string works IntegratedTime int64 LogIndex int64 LogID string @@ -51,7 +51,7 @@ func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error { // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. // Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error { - return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp, "Payload": &s.UntrustedPayload, }) @@ -63,7 +63,7 @@ var _ json.Marshaler = (*UntrustedRekorSET)(nil) // MarshalJSON implements the json.Marshaler interface. func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ + return json.Marshal(map[string]any{ "SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp, "Payload": s.UntrustedPayload, }) @@ -86,7 +86,7 @@ func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error { // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type. // Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error { - return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "body": &p.Body, "integratedTime": &p.IntegratedTime, "logIndex": &p.LogIndex, @@ -100,7 +100,7 @@ var _ json.Marshaler = (*UntrustedRekorPayload)(nil) // MarshalJSON implements the json.Marshaler interface. func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ + return json.Marshal(map[string]any{ "body": p.Body, "integratedTime": p.IntegratedTime, "logIndex": p.LogIndex, @@ -159,7 +159,7 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver } hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec) if err != nil { - // Coverage: hashedRekord.Spec is an interface{} that was just unmarshaled, + // Coverage: hashedRekord.Spec is an any that was just unmarshaled, // so this should never fail. return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err)) } diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go index afacd8203..f8ec66564 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go +++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go @@ -21,14 +21,14 @@ const ( // UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature) type UntrustedSigstorePayload struct { - UntrustedDockerManifestDigest digest.Digest - UntrustedDockerReference string // FIXME: more precise type? - UntrustedCreatorID *string + untrustedDockerManifestDigest digest.Digest + untrustedDockerReference string // FIXME: more precise type? + untrustedCreatorID *string // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, // we would add another field, UntrustedTimestampNS int64. - UntrustedTimestamp *int64 + untrustedTimestamp *int64 } // NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with @@ -39,10 +39,10 @@ func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerRefer creatorID := "containers/image " + version.Version timestamp := time.Now().Unix() return UntrustedSigstorePayload{ - UntrustedDockerManifestDigest: dockerManifestDigest, - UntrustedDockerReference: dockerReference, - UntrustedCreatorID: &creatorID, - UntrustedTimestamp: ×tamp, + untrustedDockerManifestDigest: dockerManifestDigest, + untrustedDockerReference: dockerReference, + untrustedCreatorID: &creatorID, + untrustedTimestamp: ×tamp, } } @@ -52,22 +52,22 @@ var _ json.Marshaler = (*UntrustedSigstorePayload)(nil) // MarshalJSON implements the json.Marshaler interface. func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) { - if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { + if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" { return nil, errors.New("Unexpected empty signature content") } - critical := map[string]interface{}{ + critical := map[string]any{ "type": sigstoreSignatureType, - "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, - "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, + "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.untrustedDockerReference}, } - optional := map[string]interface{}{} - if s.UntrustedCreatorID != nil { - optional["creator"] = *s.UntrustedCreatorID + optional := map[string]any{} + if s.untrustedCreatorID != nil { + optional["creator"] = *s.untrustedCreatorID } - if s.UntrustedTimestamp != nil { - optional["timestamp"] = *s.UntrustedTimestamp + if s.untrustedTimestamp != nil { + optional["timestamp"] = *s.untrustedTimestamp } - signature := map[string]interface{}{ + signature := map[string]any{ "critical": critical, "optional": optional, } @@ -92,7 +92,7 @@ func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error { // Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller. func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { var critical, optional json.RawMessage - if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "critical": &critical, "optional": &optional, }); err != nil { @@ -104,7 +104,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { var gotCreatorID, gotTimestamp = false, false // /usr/bin/cosign generates "optional": null if there are no user-specified annotations. if !bytes.Equal(optional, []byte("null")) { - if err := ParanoidUnmarshalJSONObject(optional, func(key string) interface{} { + if err := ParanoidUnmarshalJSONObject(optional, func(key string) any { switch key { case "creator": gotCreatorID = true @@ -113,7 +113,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { gotTimestamp = true return ×tamp default: - var ignore interface{} + var ignore any return &ignore } }); err != nil { @@ -121,19 +121,19 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { } } if gotCreatorID { - s.UntrustedCreatorID = &creatorID + s.untrustedCreatorID = &creatorID } if gotTimestamp { intTimestamp := int64(timestamp) if float64(intTimestamp) != timestamp { return NewInvalidSignatureError("Field optional.timestamp is not is not an integer") } - s.UntrustedTimestamp = &intTimestamp + s.untrustedTimestamp = &intTimestamp } var t string var image, identity json.RawMessage - if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ + if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ "type": &t, "image": &image, "identity": &identity, @@ -145,15 +145,15 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { } var digestString string - if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ + if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ "docker-manifest-digest": &digestString, }); err != nil { return err } - s.UntrustedDockerManifestDigest = digest.Digest(digestString) + s.untrustedDockerManifestDigest = digest.Digest(digestString) - return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ - "docker-reference": &s.UntrustedDockerReference, + return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ + "docker-reference": &s.untrustedDockerReference, }) } @@ -191,10 +191,10 @@ func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte, if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil { return nil, NewInvalidSignatureError(err.Error()) } - if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.UntrustedDockerManifestDigest); err != nil { + if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil { return nil, err } - if err := rules.ValidateSignedDockerReference(unmatchedPayload.UntrustedDockerReference); err != nil { + if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil { return nil, err } // SigstorePayloadAcceptanceRules have accepted this value. diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index 5ca4ad713..7eb5cab7d 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -104,7 +104,7 @@ var _ json.Unmarshaler = (*Policy)(nil) func (p *Policy) UnmarshalJSON(data []byte) error { *p = Policy{} transports := policyTransportsMap{} - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { switch key { case "default": return &p.Default @@ -135,7 +135,7 @@ func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { // We can't unmarshal directly into map values because it is not possible to take an address of a map value. // So, use a temporary map of pointers-to-slices and convert. tmpMap := map[string]*PolicyTransportScopes{} - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { // transport can be nil transport := transports.Get(key) // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. @@ -181,7 +181,7 @@ func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { // We can't unmarshal directly into map values because it is not possible to take an address of a map value. // So, use a temporary map of pointers-to-slices and convert. tmpMap := map[string]*PolicyRequirements{} - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { // internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe. if _, ok := tmpMap[key]; ok { return nil @@ -271,7 +271,7 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { *pr = prInsecureAcceptAnything{} var tmp prInsecureAcceptAnything - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, }); err != nil { return err @@ -301,7 +301,7 @@ var _ json.Unmarshaler = (*prReject)(nil) func (pr *prReject) UnmarshalJSON(data []byte) error { *pr = prReject{} var tmp prReject - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, }); err != nil { return err @@ -384,7 +384,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error { var tmp prSignedBy var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false var signedIdentity json.RawMessage - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { switch key { case "type": return &tmp.Type @@ -495,7 +495,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { *pr = prSignedBaseLayer{} var tmp prSignedBaseLayer var baseLayerIdentity json.RawMessage - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, "baseLayerIdentity": &baseLayerIdentity, }); err != nil { @@ -564,7 +564,7 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil) func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { *prm = prmMatchExact{} var tmp prmMatchExact - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, }); err != nil { return err @@ -594,7 +594,7 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { *prm = prmMatchRepoDigestOrExact{} var tmp prmMatchRepoDigestOrExact - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, }); err != nil { return err @@ -624,7 +624,7 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil) func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { *prm = prmMatchRepository{} var tmp prmMatchRepository - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, }); err != nil { return err @@ -664,7 +664,7 @@ var _ json.Unmarshaler = (*prmExactReference)(nil) func (prm *prmExactReference) UnmarshalJSON(data []byte) error { *prm = prmExactReference{} var tmp prmExactReference - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, "dockerReference": &tmp.DockerReference, }); err != nil { @@ -706,7 +706,7 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil) func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { *prm = prmExactRepository{} var tmp prmExactRepository - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, "dockerRepository": &tmp.DockerRepository, }); err != nil { @@ -778,7 +778,7 @@ var _ json.Unmarshaler = (*prmRemapIdentity)(nil) func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error { *prm = prmRemapIdentity{} var tmp prmRemapIdentity - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "type": &tmp.Type, "prefix": &tmp.Prefix, "signedPrefix": &tmp.SignedPrefix, diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go index eeec6dc3e..d8c6a97f1 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go @@ -147,7 +147,7 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool var fulcio prSigstoreSignedFulcio var signedIdentity json.RawMessage - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { switch key { case "type": return &tmp.Type @@ -298,7 +298,7 @@ func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error { *f = prSigstoreSignedFulcio{} var tmp prSigstoreSignedFulcio var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false... - if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { switch key { case "caPath": gotCAPath = true diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go index 533a997b1..4f8d0da38 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go @@ -46,7 +46,7 @@ type PolicyRequirement interface { // - sarRejected if the signature has not been verified; // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation // succeeded but the result was rejection. - // - sarUnknown if if this PolicyRequirement does not deal with signatures. + // - sarUnknown if this PolicyRequirement does not deal with signatures. // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. // Returning sarUnknown and a non-nil error value is invalid. // WARNING: This makes the signature contents acceptable for further processing, diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go index ef98b8b83..a4187735b 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -12,6 +12,7 @@ import ( "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" digest "github.com/opencontainers/go-digest" + "golang.org/x/exp/slices" ) func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { @@ -67,10 +68,8 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ validateKeyIdentity: func(keyIdentity string) error { - for _, trustedIdentity := range trustedIdentities { - if keyIdentity == trustedIdentity { - return nil - } + if slices.Contains(trustedIdentities, keyIdentity) { + return nil } // Coverage: We use a private GPG home directory and only import trusted keys, so this should // not be reachable. diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go index 0233c4cb8..f9c7f6a5e 100644 --- a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go +++ b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go @@ -33,19 +33,21 @@ import ( // limitations under the License. const ( - // from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType - sigstorePrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY" + // from sigstore/cosign/pkg/cosign.CosignPrivateKeyPemType. + cosignPrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY" + // from sigstore/cosign/pkg/cosign.SigstorePrivateKeyPemType. + sigstorePrivateKeyPemType = "ENCRYPTED SIGSTORE PRIVATE KEY" ) // from sigstore/cosign/pkg/cosign.loadPrivateKey -// FIXME: Do we need all of these key formats, and all of those +// FIXME: Do we need all of these key formats? func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { // Decrypt first p, _ := pem.Decode(key) if p == nil { return nil, errors.New("invalid pem block") } - if p.Type != sigstorePrivateKeyPemType { + if p.Type != sigstorePrivateKeyPemType && p.Type != cosignPrivateKeyPemType { return nil, fmt.Errorf("unsupported pem type: %s", p.Type) } @@ -86,7 +88,9 @@ func marshalKeyPair(privateKey crypto.PrivateKey, publicKey crypto.PublicKey, pa // store in PEM format privBytes := pem.EncodeToMemory(&pem.Block{ Bytes: encBytes, - Type: sigstorePrivateKeyPemType, + // Use the older “COSIGN” type name; as of 2023-03-30 cosign’s main branch generates “SIGSTORE” types, + // but a version of cosign that can accept them has not yet been released. + Type: cosignPrivateKeyPemType, }) // Now do the public key diff --git a/vendor/github.com/containers/image/v5/signature/simple.go b/vendor/github.com/containers/image/v5/signature/simple.go index e91f46aba..56b222eda 100644 --- a/vendor/github.com/containers/image/v5/signature/simple.go +++ b/vendor/github.com/containers/image/v5/signature/simple.go @@ -31,14 +31,14 @@ type Signature struct { // untrustedSignature is a parsed content of a signature. type untrustedSignature struct { - UntrustedDockerManifestDigest digest.Digest - UntrustedDockerReference string // FIXME: more precise type? - UntrustedCreatorID *string + untrustedDockerManifestDigest digest.Digest + untrustedDockerReference string // FIXME: more precise type? + untrustedCreatorID *string // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, // we would add another field, UntrustedTimestampNS int64. - UntrustedTimestamp *int64 + untrustedTimestamp *int64 } // UntrustedSignatureInformation is information available in an untrusted signature. @@ -65,10 +65,10 @@ func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference s creatorID := "atomic " + version.Version timestamp := time.Now().Unix() return untrustedSignature{ - UntrustedDockerManifestDigest: dockerManifestDigest, - UntrustedDockerReference: dockerReference, - UntrustedCreatorID: &creatorID, - UntrustedTimestamp: ×tamp, + untrustedDockerManifestDigest: dockerManifestDigest, + untrustedDockerReference: dockerReference, + untrustedCreatorID: &creatorID, + untrustedTimestamp: ×tamp, } } @@ -78,22 +78,22 @@ var _ json.Marshaler = (*untrustedSignature)(nil) // MarshalJSON implements the json.Marshaler interface. func (s untrustedSignature) MarshalJSON() ([]byte, error) { - if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { + if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" { return nil, errors.New("Unexpected empty signature content") } - critical := map[string]interface{}{ + critical := map[string]any{ "type": signatureType, - "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, - "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, + "image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()}, + "identity": map[string]string{"docker-reference": s.untrustedDockerReference}, } - optional := map[string]interface{}{} - if s.UntrustedCreatorID != nil { - optional["creator"] = *s.UntrustedCreatorID + optional := map[string]any{} + if s.untrustedCreatorID != nil { + optional["creator"] = *s.untrustedCreatorID } - if s.UntrustedTimestamp != nil { - optional["timestamp"] = *s.UntrustedTimestamp + if s.untrustedTimestamp != nil { + optional["timestamp"] = *s.untrustedTimestamp } - signature := map[string]interface{}{ + signature := map[string]any{ "critical": critical, "optional": optional, } @@ -118,7 +118,7 @@ func (s *untrustedSignature) UnmarshalJSON(data []byte) error { // Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller. func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { var critical, optional json.RawMessage - if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{ "critical": &critical, "optional": &optional, }); err != nil { @@ -128,7 +128,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { var creatorID string var timestamp float64 var gotCreatorID, gotTimestamp = false, false - if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) interface{} { + if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any { switch key { case "creator": gotCreatorID = true @@ -137,26 +137,26 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { gotTimestamp = true return ×tamp default: - var ignore interface{} + var ignore any return &ignore } }); err != nil { return err } if gotCreatorID { - s.UntrustedCreatorID = &creatorID + s.untrustedCreatorID = &creatorID } if gotTimestamp { intTimestamp := int64(timestamp) if float64(intTimestamp) != timestamp { return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer") } - s.UntrustedTimestamp = &intTimestamp + s.untrustedTimestamp = &intTimestamp } var t string var image, identity json.RawMessage - if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{ "type": &t, "image": &image, "identity": &identity, @@ -168,15 +168,15 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { } var digestString string - if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ + if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{ "docker-manifest-digest": &digestString, }); err != nil { return err } - s.UntrustedDockerManifestDigest = digest.Digest(digestString) + s.untrustedDockerManifestDigest = digest.Digest(digestString) - return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ - "docker-reference": &s.UntrustedDockerReference, + return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ + "docker-reference": &s.untrustedDockerReference, }) } @@ -229,16 +229,16 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { return nil, internal.NewInvalidSignatureError(err.Error()) } - if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { + if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil { return nil, err } - if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil { + if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil { return nil, err } // signatureAcceptanceRules have accepted this value. return &Signature{ - DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest, - DockerReference: unmatchedSignature.UntrustedDockerReference, + DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest, + DockerReference: unmatchedSignature.untrustedDockerReference, }, nil } @@ -269,14 +269,14 @@ func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes [] } var timestamp *time.Time // = nil - if untrustedDecodedContents.UntrustedTimestamp != nil { - ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0) + if untrustedDecodedContents.untrustedTimestamp != nil { + ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0) timestamp = &ts } return &UntrustedSignatureInformation{ - UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest, - UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference, - UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID, + UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest, + UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference, + UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID, UntrustedTimestamp: timestamp, UntrustedShortKeyIdentifier: shortKeyIdentifier, }, nil diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index ae3bfa8fa..576d510cc 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -21,6 +21,7 @@ import ( "github.com/containers/image/v5/internal/imagedestination/stubs" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/internal/tmpdir" "github.com/containers/image/v5/manifest" @@ -34,6 +35,7 @@ import ( digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) var ( @@ -75,13 +77,19 @@ type storageImageDestination struct { indexToStorageID map[int]*string // All accesses to below data are protected by `lock` which is made // *explicit* in the code. - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) - indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob - blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer - diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) + indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image + blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer + diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output +} + +// addedLayerInfo records data about a layer to use in this image. +type addedLayerInfo struct { + digest digest.Digest + emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. } // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until @@ -109,18 +117,18 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (* HasThreadSafePutBlob: true, }), - imageRef: imageRef, - directory: directory, - signatureses: make(map[digest.Digest][]byte), - blobDiffIDs: make(map[digest.Digest]digest.Digest), - blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, - SignaturesSizes: make(map[digest.Digest][]int), - indexToStorageID: make(map[int]*string), - indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo), - diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), + imageRef: imageRef, + directory: directory, + signatureses: make(map[digest.Digest][]byte), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + SignaturesSizes: make(map[digest.Digest][]int), + indexToStorageID: make(map[int]*string), + indexToAddedLayerInfo: make(map[int]addedLayerInfo), + diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput), } dest.Compat = impl.AddCompat(dest) return dest, nil @@ -156,8 +164,8 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string { // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) { - info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options) +func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { + info, err := s.putBlobToPendingFile(stream, blobinfo, &options) if err != nil { return info, err } @@ -166,21 +174,20 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream return info, nil } - return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) + return info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{ + digest: info.Digest, + emptyLayer: options.EmptyLayer, + }) } // putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file. // The caller must arrange the blob to be eventually committed using s.commitLayer(). -func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) { +func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (private.UploadedBlob, error) { // Stores a layer or data blob in our temporary directory, checking that any information // in the blobinfo matches the incoming data. - errorBlobInfo := types.BlobInfo{ - Digest: "", - Size: -1, - } if blobinfo.Digest != "" { if err := blobinfo.Digest.Validate(); err != nil { - return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err) + return private.UploadedBlob{}, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err) } } @@ -188,7 +195,7 @@ func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stre filename := s.computeNextBlobCacheFile() file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) if err != nil { - return errorBlobInfo, fmt.Errorf("creating temporary file %q: %w", filename, err) + return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err) } defer file.Close() counter := ioutils.NewWriteCounter(file) @@ -196,16 +203,16 @@ func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stre digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo) decompressed, err := archive.DecompressStream(stream) if err != nil { - return errorBlobInfo, fmt.Errorf("setting up to decompress blob: %w", err) + return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err) } diffID := digest.Canonical.Digester() // Copy the data to the file. - // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). + // TODO: This can take quite some time, and should ideally be cancellable using context.Context. _, err = io.Copy(diffID.Hash(), decompressed) decompressed.Close() if err != nil { - return errorBlobInfo, fmt.Errorf("storing blob to file %q: %w", filename, err) + return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err) } // Determine blob properties, and fail if information that we were given about the blob @@ -215,7 +222,7 @@ func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stre if blobSize < 0 { blobSize = counter.Count } else if blobinfo.Size != counter.Count { - return errorBlobInfo, ErrBlobSizeMismatch + return private.UploadedBlob{}, ErrBlobSizeMismatch } // Record information about the blob. @@ -227,10 +234,9 @@ func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stre // This is safe because we have just computed diffID, and blobDigest was either computed // by us, or validated by the caller (usually copy.digestingReader). options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) - return types.BlobInfo{ - Digest: blobDigest, - Size: blobSize, - MediaType: blobinfo.MediaType, + return private.UploadedBlob{ + Digest: blobDigest, + Size: blobSize, }, nil } @@ -242,7 +248,7 @@ type zstdFetcher struct { // GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt. func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - var newChunks []private.ImageSourceChunk + newChunks := make([]private.ImageSourceChunk, 0, len(chunks)) for _, v := range chunks { i := private.ImageSourceChunk{ Offset: v.Offset, @@ -263,7 +269,7 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read // It is available only if SupportsPutBlobPartial(). // Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller // should fall back to PutBlobWithOptions. -func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) { +func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) { fetcher := zstdFetcher{ chunkAccessor: chunkAccessor, ctx: ctx, @@ -272,12 +278,12 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher) if err != nil { - return srcInfo, err + return private.UploadedBlob{}, err } out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ) if err != nil { - return srcInfo, err + return private.UploadedBlob{}, err } blobDigest := srcInfo.Digest @@ -289,124 +295,126 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces s.diffOutputs[blobDigest] = out s.lock.Unlock() - return srcInfo, nil + return private.UploadedBlob{ + Digest: blobDigest, + Size: srcInfo.Size, + }, nil } // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. +// If the blob has been successfully reused, returns (true, info, nil). // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { - reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options) +func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { + reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options) if err != nil || !reused || options.LayerIndex == nil { return reused, info, err } - return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer) + return reused, info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{ + digest: info.Digest, + emptyLayer: options.EmptyLayer, + }) } -// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata. +// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata. // The caller must arrange the blob to be eventually committed using s.commitLayer(). -func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) { +func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { // lock the entire method as it executes fairly quickly s.lock.Lock() defer s.lock.Unlock() if options.SrcRef != nil { // Check if we have the layer in the underlying additional layer store. - aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String()) + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String()) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobinfo.Digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err) } else if err == nil { // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest() - s.blobAdditionalLayer[blobinfo.Digest] = aLayer - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: aLayer.CompressedSize(), - MediaType: blobinfo.MediaType, + s.blobDiffIDs[digest] = aLayer.UncompressedDigest() + s.blobAdditionalLayer[digest] = aLayer + return true, private.ReusedBlob{ + Digest: digest, + Size: aLayer.CompressedSize(), }, nil } } - if blobinfo.Digest == "" { - return false, types.BlobInfo{}, errors.New(`Can not check for a blob with unknown digest`) + if digest == "" { + return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`) } - if err := blobinfo.Digest.Validate(); err != nil { - return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) + if err := digest.Validate(); err != nil { + return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err) } // Check if we've already cached it in a file. - if size, ok := s.fileSizes[blobinfo.Digest]; ok { - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: size, - MediaType: blobinfo.MediaType, + if size, ok := s.fileSizes[digest]; ok { + return true, private.ReusedBlob{ + Digest: digest, + Size: size, }, nil } // Check if we have a wasn't-compressed layer in storage that's based on that blob. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobinfo.Digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err) } if len(layers) > 0 { // Save this for completeness. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: layers[0].UncompressedSize, - MediaType: blobinfo.MediaType, + s.blobDiffIDs[digest] = layers[0].UncompressedDigest + return true, private.ReusedBlob{ + Digest: digest, + Size: layers[0].UncompressedSize, }, nil } // Check if we have a was-compressed layer in storage that's based on that blob. - layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobinfo.Digest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err) } if len(layers) > 0 { // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: blobinfo.Digest, - Size: layers[0].CompressedSize, - MediaType: blobinfo.MediaType, + s.blobDiffIDs[digest] = layers[0].UncompressedDigest + return true, private.ReusedBlob{ + Digest: digest, + Size: layers[0].CompressedSize, }, nil } // Does the blob correspond to a known DiffID which we already have available? // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the // uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size. - if options.CanSubstitute || blobinfo.Size != -1 { - if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { + if options.CanSubstitute || size != -1 { + if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest { layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { - return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) } if len(layers) > 0 { - if blobinfo.Size != -1 { - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, blobinfo, nil + if size != -1 { + s.blobDiffIDs[digest] = layers[0].UncompressedDigest + return true, private.ReusedBlob{ + Digest: digest, + Size: size, + }, nil } if !options.CanSubstitute { - return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo) + return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest) } s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest - return true, types.BlobInfo{ - Digest: uncompressedDigest, - Size: layers[0].UncompressedSize, - MediaType: blobinfo.MediaType, + return true, private.ReusedBlob{ + Digest: uncompressedDigest, + Size: layers[0].UncompressedSize, }, nil } } } // Nope, we don't have it. - return false, types.BlobInfo{}, nil + return false, private.ReusedBlob{}, nil } // computeID computes a recommended image ID based on information we have so far. If @@ -468,10 +476,10 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er return nil, errors.New("blob not found") } -// queueOrCommit queues in the specified blob to be committed to the storage. +// queueOrCommit queues the specified layer to be committed to the storage. // If no other goroutine is already committing layers, the layer and all // subsequent layers (if already queued) will be committed to the storage. -func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error { +func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) error { // NOTE: whenever the code below is touched, make sure that all code // paths unlock the lock and to unlock it exactly once. // @@ -491,10 +499,7 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types. // caller is the "worker" routine committing layers. All other routines // can continue pulling and queuing in layers. s.lock.Lock() - s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{ - BlobInfo: blob, - EmptyLayer: emptyLayer, - } + s.indexToAddedLayerInfo[index] = info // We're still waiting for at least one previous/parent layer to be // committed, so there's nothing to do. @@ -503,10 +508,14 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types. return nil } - for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] { + for { + info, ok := s.indexToAddedLayerInfo[index] + if !ok { + break + } s.lock.Unlock() // Note: commitLayer locks on-demand. - if err := s.commitLayer(ctx, *info, index); err != nil { + if err := s.commitLayer(index, info, -1); err != nil { return err } s.lock.Lock() @@ -520,13 +529,15 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types. return nil } -// commitLayer commits the specified blob with the given index to the storage. +// commitLayer commits the specified layer with the given index to the storage. +// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs. +// // Note that the previous layer is expected to already be committed. // // Caution: this function must be called without holding `s.lock`. Callers // must guarantee that, at any given time, at most one goroutine may execute // `commitLayer()`. -func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error { +func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error { // Already committed? Return early. if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted { return nil @@ -541,7 +552,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest } // Carry over the previous ID for empty non-base layers. - if blob.EmptyLayer { + if info.emptyLayer { s.indexToStorageID[index] = &lastLayer return nil } @@ -549,7 +560,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest // Check if there's already a layer with the ID that we'd give to the result of applying // this layer blob to its parent, if it has one, or the blob's hex value otherwise. s.lock.Lock() - diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + diffID, haveDiffID := s.blobDiffIDs[info.digest] s.lock.Unlock() if !haveDiffID { // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), @@ -558,18 +569,21 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest // that relies on using a blob digest that has never been seen by the store had better call // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only // so far we are going to accommodate that (if we should be doing that at all). - logrus.Debugf("looking for diffID for blob %+v", blob.Digest) - // NOTE: use `TryReusingBlob` to prevent recursion. - has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) + logrus.Debugf("looking for diffID for blob %+v", info.digest) + // Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit. + has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{ + Cache: none.NoCache, + CanSubstitute: false, + }) if err != nil { - return fmt.Errorf("checking for a layer based on blob %q: %w", blob.Digest.String(), err) + return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err) } if !has { - return fmt.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String()) } - diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + diffID, haveDiffID = s.blobDiffIDs[info.digest] if !haveDiffID { - return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String()) } } id := diffID.Hex() @@ -584,7 +598,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest } s.lock.Lock() - diffOutput, ok := s.diffOutputs[blob.Digest] + diffOutput, ok := s.diffOutputs[info.digest] s.lock.Unlock() if ok { layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil) @@ -593,7 +607,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest } // FIXME: what to do with the uncompressed digest? - diffOutput.UncompressedDigest = blob.Digest + diffOutput.UncompressedDigest = info.digest if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil { _ = s.imageRef.transport.store.Delete(layer.ID) @@ -605,7 +619,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest } s.lock.Lock() - al, ok := s.blobAdditionalLayer[blob.Digest] + al, ok := s.blobAdditionalLayer[info.digest] s.lock.Unlock() if ok { layer, err := al.PutAs(id, lastLayer, nil) @@ -620,7 +634,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest // Check if we previously cached a file with that blob's contents. If we didn't, // then we need to read the desired contents from a layer. s.lock.Lock() - filename, ok := s.filenames[blob.Digest] + filename, ok := s.filenames[info.digest] s.lock.Unlock() if !ok { // Try to find the layer with contents matching that blobsum. @@ -629,13 +643,13 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest if err2 == nil && len(layers) > 0 { layer = layers[0].ID } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest) if err2 == nil && len(layers) > 0 { layer = layers[0].ID } } if layer == "" { - return fmt.Errorf("locating layer for blob %q: %w", blob.Digest, err2) + return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2) } // Read the layer's contents. noCompression := archive.Uncompressed @@ -644,7 +658,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest } diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) if err2 != nil { - return fmt.Errorf("reading layer %q for blob %q: %w", layer, blob.Digest, err2) + return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2) } // Copy the layer diff to a file. Diff() takes a lock that it holds // until the ReadCloser that it returns is closed, and PutLayer() wants @@ -668,7 +682,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest // Make sure that we can find this file later, should we need the layer's // contents again. s.lock.Lock() - s.filenames[blob.Digest] = filename + s.filenames[info.digest] = filename s.lock.Unlock() } // Read the cached blob and use it as a diff. @@ -680,11 +694,11 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest // Build the new layer using the diff, regardless of where it came from. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{ - OriginalDigest: blob.Digest, + OriginalDigest: info.digest, UncompressedDigest: diffID, }, file) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return fmt.Errorf("adding layer with blob %q: %w", blob.Digest, err) + return fmt.Errorf("adding layer with blob %q: %w", info.digest, err) } s.indexToStorageID[index] = &layer.ID @@ -735,7 +749,10 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Extract, commit, or find the layers. for i, blob := range layerBlobs { - if err := s.commitLayer(ctx, blob, i); err != nil { + if err := s.commitLayer(i, addedLayerInfo{ + digest: blob.Digest, + emptyLayer: blob.EmptyLayer, + }, blob.Size); err != nil { return err } } @@ -795,14 +812,14 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so // we just need to screen out the ones that are actually layers to get the list of non-layers. - dataBlobs := make(map[digest.Digest]struct{}) + dataBlobs := set.New[digest.Digest]() for blob := range s.filenames { - dataBlobs[blob] = struct{}{} + dataBlobs.Add(blob) } for _, layerBlob := range layerBlobs { - delete(dataBlobs, layerBlob.Digest) + dataBlobs.Delete(layerBlob.Digest) } - for blob := range dataBlobs { + for _, blob := range dataBlobs.Values() { v, err := os.ReadFile(s.filenames[blob]) if err != nil { return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) @@ -883,9 +900,7 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob if err != nil { return err } - newBlob := make([]byte, len(manifestBlob)) - copy(newBlob, manifestBlob) - s.manifest = newBlob + s.manifest = slices.Clone(manifestBlob) s.manifestDigest = digest return nil } diff --git a/vendor/github.com/containers/image/v5/storage/storage_image.go b/vendor/github.com/containers/image/v5/storage/storage_image.go index 9f16dd334..ac09f3dbb 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_image.go +++ b/vendor/github.com/containers/image/v5/storage/storage_image.go @@ -43,7 +43,7 @@ func (s *storageImageCloser) Size() (int64, error) { // newImage creates an image that also knows its size func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) { - src, err := newImageSource(ctx, sys, s) + src, err := newImageSource(sys, s) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index dbb9804a6..49f7d03c8 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -14,6 +14,7 @@ import ( "github.com/containers/storage" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "golang.org/x/exp/slices" ) // A storageReference holds an arbitrary name and/or an ID, which is a 32-byte @@ -52,17 +53,15 @@ func newReference(transport storageTransport, named reference.Named, id string) // imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { repo := ref.Name() - for _, name := range image.Names { - if named, err := reference.ParseNormalizedNamed(name); err == nil { - if named.Name() == repo { - return true - } + return slices.ContainsFunc(image.Names, func(name string) bool { + if named, err := reference.ParseNormalizedNamed(name); err == nil && named.Name() == repo { + return true } - } - return false + return false + }) } -// multiArchImageMatchesSystemContext returns true if if the passed-in image both contains a +// multiArchImageMatchesSystemContext returns true if the passed-in image both contains a // multi-arch manifest that matches the passed-in digest, and the image is the per-platform // image instance that matches sys. // @@ -170,11 +169,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag // sake of older consumers that don't know there's a whole list in there now. if s.named != nil { if digested, ok := s.named.(reference.Digested); ok { - for _, digest := range loadedImage.Digests { - if digest == digested.Digest() { - loadedImage.Digest = digest - break - } + digest := digested.Digest() + if slices.Contains(loadedImage.Digests, digest) { + loadedImage.Digest = digest } } } @@ -207,10 +204,10 @@ func (s storageReference) StringWithinTransport() string { } res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" if s.named != nil { - res = res + s.named.String() + res += s.named.String() } if s.id != "" { - res = res + "@" + s.id + res += "@" + s.id } return res } @@ -218,10 +215,10 @@ func (s storageReference) StringWithinTransport() string { func (s storageReference) PolicyConfigurationIdentity() string { res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" if s.named != nil { - res = res + s.named.String() + res += s.named.String() } if s.id != "" { - res = res + "@" + s.id + res += "@" + s.id } return res } @@ -280,7 +277,7 @@ func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemCont } func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, sys, s) + return newImageSource(sys, s) } func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index d1affc5e9..03c2fa28c 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -44,7 +44,7 @@ type storageImageSource struct { } // newImageSource sets up an image for reading. -func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { +func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) { // First, locate the image. img, err := imageRef.resolveImage(sys) if err != nil { @@ -186,7 +186,7 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st } // GetManifest() reads the image's manifest. -func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { +func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) { if instanceDigest != nil { key := manifestBigDataKey(*instanceDigest) blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) @@ -293,7 +293,7 @@ func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos [] if nextPhysical >= len(physicalInfos) { return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) } - res[i] = physicalInfos[nextPhysical] + res[i] = physicalInfos[nextPhysical] // FIXME? Should we preserve more data in manifestInfos? Notably the current approach correctly removes zstd:chunked metadata annotations. nextPhysical++ } } diff --git a/vendor/github.com/containers/image/v5/storage/storage_transport.go b/vendor/github.com/containers/image/v5/storage/storage_transport.go index 104e9d8cc..58ba3ee65 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_transport.go +++ b/vendor/github.com/containers/image/v5/storage/storage_transport.go @@ -234,35 +234,30 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc reference = reference[closeIndex+1:] // Peel off a "driver@" from the start. driverInfo := "" - driverSplit := strings.SplitN(storeSpec, "@", 2) - if len(driverSplit) != 2 { + driverPart1, driverPart2, gotDriver := strings.Cut(storeSpec, "@") + if !gotDriver { + storeSpec = driverPart1 if storeSpec == "" { return nil, ErrInvalidReference } } else { - driverInfo = driverSplit[0] + driverInfo = driverPart1 if driverInfo == "" { return nil, ErrInvalidReference } - storeSpec = driverSplit[1] + storeSpec = driverPart2 if storeSpec == "" { return nil, ErrInvalidReference } } // Peel off a ":options" from the end. var options []string - optionsSplit := strings.SplitN(storeSpec, ":", 2) - if len(optionsSplit) == 2 { - options = strings.Split(optionsSplit[1], ",") - storeSpec = optionsSplit[0] + storeSpec, optionsPart, gotOptions := strings.Cut(storeSpec, ":") + if gotOptions { + options = strings.Split(optionsPart, ",") } // Peel off a "+runroot" from the new end. - runRootInfo := "" - runRootSplit := strings.SplitN(storeSpec, "+", 2) - if len(runRootSplit) == 2 { - runRootInfo = runRootSplit[1] - storeSpec = runRootSplit[0] - } + storeSpec, runRootInfo, _ := strings.Cut(storeSpec, "+") // runRootInfo is "" if there is no "+" // The rest is our graph root. rootInfo := storeSpec // Check that any paths are absolute paths. diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go index 690067ec3..d5578d9e8 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -9,8 +9,8 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/types" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" ) // ConfigUpdater is an interface that ImageReferences for "tarball" images also @@ -35,9 +35,7 @@ func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[ if r.annotations == nil { r.annotations = make(map[string]string) } - for k, v := range annotations { - r.annotations[k] = v - } + maps.Copy(r.annotations, annotations) return nil } @@ -73,7 +71,7 @@ func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContex func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { for _, filename := range r.filenames { if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error removing %q: %v", filename, err) + return fmt.Errorf("error removing %q: %w", filename, err) } } return nil diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 1dc4c3ad9..0fb3a8393 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -18,6 +18,8 @@ import ( digest "github.com/opencontainers/go-digest" imgspecs "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" ) type tarballImageSource struct { @@ -62,13 +64,13 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System } else { file, err = os.Open(filename) if err != nil { - return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) + return nil, fmt.Errorf("error opening %q for reading: %w", filename, err) } defer file.Close() reader = file fileinfo, err := file.Stat() if err != nil { - return nil, fmt.Errorf("error reading size of %q: %v", filename, err) + return nil, fmt.Errorf("error reading size of %q: %w", filename, err) } blobSize = fileinfo.Size() blobTime = fileinfo.ModTime() @@ -168,10 +170,6 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System MediaType: blobTypes[i], }) } - annotations := make(map[string]string) - for k, v := range r.annotations { - annotations[k] = v - } manifest := imgspecv1.Manifest{ Versioned: imgspecs.Versioned{ SchemaVersion: 2, @@ -182,7 +180,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System MediaType: imgspecv1.MediaTypeImageConfig, }, Layers: layerDescriptors, - Annotations: annotations, + Annotations: maps.Clone(r.annotations), } // Encode the manifest. @@ -228,20 +226,19 @@ func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobIn return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil } // Maybe one of the layer blobs. - for i := range is.blobIDs { - if blobinfo.Digest == is.blobIDs[i] { - // We want to read that layer: open the file or memory block and hand it back. - if is.filenames[i] == "-" { - return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil - } - reader, err := os.Open(is.filenames[i]) - if err != nil { - return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) - } - return reader, is.blobSizes[i], nil - } + i := slices.Index(is.blobIDs, blobinfo.Digest) + if i == -1 { + return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) + } + // We want to read that layer: open the file or memory block and hand it back. + if is.filenames[i] == "-" { + return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + } + reader, err := os.Open(is.filenames[i]) + if err != nil { + return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) } - return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) + return reader, is.blobSizes[i], nil } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index 62d767b58..1d9c2dc35 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -25,24 +25,24 @@ import ( // ParseImageName converts a URL-like image name to a types.ImageReference. func ParseImageName(imgName string) (types.ImageReference, error) { // Keep this in sync with TransportFromImageName! - parts := strings.SplitN(imgName, ":", 2) - if len(parts) != 2 { + transportName, withinTransport, valid := strings.Cut(imgName, ":") + if !valid { return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) } - transport := transports.Get(parts[0]) + transport := transports.Get(transportName) if transport == nil { - return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) + return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName) } - return transport.ParseReference(parts[1]) + return transport.ParseReference(withinTransport) } // TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when // the transport is unknown or when the input is invalid. func TransportFromImageName(imageName string) types.ImageTransport { // Keep this in sync with ParseImageName! - parts := strings.SplitN(imageName, ":", 2) - if len(parts) == 2 { - return transports.Get(parts[0]) + transportName, _, valid := strings.Cut(imageName, ":") + if valid { + return transports.Get(transportName) } return nil } diff --git a/vendor/github.com/containers/image/v5/transports/transports.go b/vendor/github.com/containers/image/v5/transports/transports.go index 46ee3710f..834f33b48 100644 --- a/vendor/github.com/containers/image/v5/transports/transports.go +++ b/vendor/github.com/containers/image/v5/transports/transports.go @@ -5,6 +5,7 @@ import ( "sort" "sync" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/types" ) @@ -66,22 +67,21 @@ func Register(t types.ImageTransport) { // This is the generally recommended way to refer to images in the UI. // // NOTE: The returned string is not promised to be equal to the original input to ParseImageName; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. func ImageName(ref types.ImageReference) string { return ref.Transport().Name() + ":" + ref.StringWithinTransport() } +var deprecatedTransports = set.NewWithValues("atomic") + // ListNames returns a list of non deprecated transport names. // Deprecated transports can be used, but are not presented to users. func ListNames() []string { kt.mu.Lock() defer kt.mu.Unlock() - deprecated := map[string]bool{ - "atomic": true, - } var names []string for _, transport := range kt.transports { - if !deprecated[transport.Name()] { + if !deprecatedTransports.Contains(transport.Name()) { names = append(names, transport.Name()) } } diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go index 33a54566c..6ea414b86 100644 --- a/vendor/github.com/containers/image/v5/types/types.go +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -11,7 +11,7 @@ import ( v1 "github.com/opencontainers/image-spec/specs-go/v1" ) -// ImageTransport is a top-level namespace for ways to to store/load an image. +// ImageTransport is a top-level namespace for ways to store/load an image. // It should generally correspond to ImageSource/ImageDestination implementations. // // Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. @@ -48,7 +48,7 @@ type ImageReference interface { // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; - // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. + // e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; // instead, see transports.ImageName(). StringWithinTransport() string @@ -138,7 +138,7 @@ type BlobInfo struct { // or if it should be compressed or decompressed. The field defaults to // preserve the original layer's compressedness. // TODO: To remove together with CryptoOperation in re-design to remove - // field out out of BlobInfo. + // field out of BlobInfo. CompressionOperation LayerCompression // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be @@ -149,7 +149,7 @@ type BlobInfo struct { // CryptoOperation is used in Image.UpdateLayerInfos to instruct // whether the original layer was encrypted/decrypted // TODO: To remove together with CompressionOperation in re-design to - // remove field out out of BlobInfo. + // remove field out of BlobInfo. CryptoOperation LayerCrypto // Before adding any fields to this struct, read the NOTE above. } diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index a33f899f2..16a6d5816 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,9 +6,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 24 + VersionMinor = 25 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go index 3efda3482..d4d2b58f2 100644 --- a/vendor/github.com/go-openapi/runtime/client_request.go +++ b/vendor/github.com/go-openapi/runtime/client_request.go @@ -16,7 +16,6 @@ package runtime import ( "io" - "io/ioutil" "net/http" "net/url" "time" @@ -79,7 +78,7 @@ type NamedReadCloser interface { func NamedReader(name string, rdr io.Reader) NamedReadCloser { rc, ok := rdr.(io.ReadCloser) if !ok { - rc = ioutil.NopCloser(rdr) + rc = io.NopCloser(rdr) } return &namedReadCloser{ name: name, diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go index 8740b1505..a8a3604a2 100644 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -142,7 +142,7 @@ func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { // BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. -func (id *ObjectId) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { +func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { var oid bsonprim.ObjectID copy(oid[:], data) *id = ObjectId(oid) diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 6f5a44bb7..9bef4c3b3 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -133,13 +133,19 @@ func (t DateTime) String() string { } // IsZero returns whether the date time is a zero value -func (t DateTime) IsZero() bool { - return time.Time(t).IsZero() +func (t *DateTime) IsZero() bool { + if t == nil { + return true + } + return time.Time(*t).IsZero() } // IsUnixZerom returns whether the date time is equivalent to time.Unix(0, 0).UTC(). -func (t DateTime) IsUnixZero() bool { - return time.Time(t) == UnixZero +func (t *DateTime) IsUnixZero() bool { + if t == nil { + return true + } + return time.Time(*t).Equal(UnixZero) } // MarshalText implements the text marshaller interface diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go index c88d35d7f..e7ad8c103 100644 --- a/vendor/github.com/go-openapi/validate/values.go +++ b/vendor/github.com/go-openapi/validate/values.go @@ -248,7 +248,7 @@ func MinimumUint(path, in string, data, min uint64, exclusive bool) *errors.Vali // MultipleOf validates if the provided number is a multiple of the factor func MultipleOf(path, in string, data, factor float64) *errors.Validation { // multipleOf factor must be positive - if factor < 0 { + if factor <= 0 { return errors.MultipleOfMustBePositive(path, in, factor) } var mult float64 @@ -266,7 +266,7 @@ func MultipleOf(path, in string, data, factor float64) *errors.Validation { // MultipleOfInt validates if the provided integer is a multiple of the factor func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation { // multipleOf factor must be positive - if factor < 0 { + if factor <= 0 { return errors.MultipleOfMustBePositive(path, in, factor) } mult := data / factor @@ -278,6 +278,10 @@ func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation // MultipleOfUint validates if the provided unsigned integer is a multiple of the factor func MultipleOfUint(path, in string, data, factor uint64) *errors.Validation { + // multipleOf factor must be positive + if factor == 0 { + return errors.MultipleOfMustBePositive(path, in, factor) + } mult := data / factor if mult*factor != data { return errors.NotMultipleOf(path, in, factor, data) diff --git a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go index b58a7bc93..3ffbcc91f 100644 --- a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go +++ b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go @@ -18,16 +18,40 @@ import ( "crypto/x509/pkix" "encoding/asn1" "errors" + "fmt" ) var ( - OIDIssuer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1} - OIDGitHubWorkflowTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 2} - OIDGitHubWorkflowSHA = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 3} - OIDGitHubWorkflowName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 4} + // Deprecated: Use OIDIssuerV2 + OIDIssuer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1} + // Deprecated: Use OIDBuildTrigger + OIDGitHubWorkflowTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 2} + // Deprecated: Use OIDSourceRepositoryDigest + OIDGitHubWorkflowSHA = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 3} + // Deprecated: Use OIDBuildConfigURI or OIDBuildConfigDigest + OIDGitHubWorkflowName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 4} + // Deprecated: Use SourceRepositoryURI OIDGitHubWorkflowRepository = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 5} - OIDGitHubWorkflowRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 6} - OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} + // Deprecated: Use OIDSourceRepositoryRef + OIDGitHubWorkflowRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 6} + + OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7} + OIDIssuerV2 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 8} + + // CI extensions + OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9} + OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10} + OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11} + OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12} + OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13} + OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14} + OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15} + OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16} + OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17} + OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18} + OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19} + OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20} + OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21} ) // Extensions contains all custom x509 extensions defined by Fulcio @@ -39,33 +63,79 @@ type Extensions struct { // a federated login like Dex it should match the issuer URL of the // upstream issuer. The issuer is not set the extensions are invalid and // will fail to render. - Issuer string // OID 1.3.6.1.4.1.57264.1.1 + Issuer string // OID 1.3.6.1.4.1.57264.1.8 and 1.3.6.1.4.1.57264.1.1 (Deprecated) + // Deprecated // Triggering event of the Github Workflow. Matches the `event_name` claim of ID // tokens from Github Actions GithubWorkflowTrigger string // OID 1.3.6.1.4.1.57264.1.2 + // Deprecated // SHA of git commit being built in Github Actions. Matches the `sha` claim of ID // tokens from Github Actions GithubWorkflowSHA string // OID 1.3.6.1.4.1.57264.1.3 + // Deprecated // Name of Github Actions Workflow. Matches the `workflow` claim of the ID // tokens from Github Actions GithubWorkflowName string // OID 1.3.6.1.4.1.57264.1.4 + // Deprecated // Repository of the Github Actions Workflow. Matches the `repository` claim of the ID // tokens from Github Actions GithubWorkflowRepository string // OID 1.3.6.1.4.1.57264.1.5 + // Deprecated // Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens // from Github Actions GithubWorkflowRef string // 1.3.6.1.4.1.57264.1.6 + + // Reference to specific build instructions that are responsible for signing. + BuildSignerURI string // 1.3.6.1.4.1.57264.1.9 + + // Immutable reference to the specific version of the build instructions that is responsible for signing. + BuildSignerDigest string // 1.3.6.1.4.1.57264.1.10 + + // Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure. + RunnerEnvironment string // 1.3.6.1.4.1.57264.1.11 + + // Source repository URL that the build was based on. + SourceRepositoryURI string // 1.3.6.1.4.1.57264.1.12 + + // Immutable reference to a specific version of the source code that the build was based upon. + SourceRepositoryDigest string // 1.3.6.1.4.1.57264.1.13 + + // Source Repository Ref that the build run was based upon. + SourceRepositoryRef string // 1.3.6.1.4.1.57264.1.14 + + // Immutable identifier for the source repository the workflow was based upon. + SourceRepositoryIdentifier string // 1.3.6.1.4.1.57264.1.15 + + // Source repository owner URL of the owner of the source repository that the build was based on. + SourceRepositoryOwnerURI string // 1.3.6.1.4.1.57264.1.16 + + // Immutable identifier for the owner of the source repository that the workflow was based upon. + SourceRepositoryOwnerIdentifier string // 1.3.6.1.4.1.57264.1.17 + + // Build Config URL to the top-level/initiating build instructions. + BuildConfigURI string // 1.3.6.1.4.1.57264.1.18 + + // Immutable reference to the specific version of the top-level/initiating build instructions. + BuildConfigDigest string // 1.3.6.1.4.1.57264.1.19 + + // Event or action that initiated the build. + BuildTrigger string // 1.3.6.1.4.1.57264.1.20 + + // Run Invocation URL to uniquely identify the build execution. + RunInvocationURI string // 1.3.6.1.4.1.57264.1.21 } func (e Extensions) Render() ([]pkix.Extension, error) { var exts []pkix.Extension + // BEGIN: Deprecated if e.Issuer != "" { + // deprecated issuer extension due to incorrect encoding exts = append(exts, pkix.Extension{ Id: OIDIssuer, Value: []byte(e.Issuer), @@ -103,14 +173,163 @@ func (e Extensions) Render() ([]pkix.Extension, error) { Value: []byte(e.GithubWorkflowRef), }) } + // END: Deprecated + + // duplicate issuer with correct RFC 5280 encoding + if e.Issuer != "" { + // construct DER encoding of issuer string + val, err := asn1.MarshalWithParams(e.Issuer, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDIssuerV2, + Value: val, + }) + } else { + return nil, errors.New("extensions must have a non-empty issuer url") + } + + if e.BuildSignerURI != "" { + val, err := asn1.MarshalWithParams(e.BuildSignerURI, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDBuildSignerURI, + Value: val, + }) + } + if e.BuildSignerDigest != "" { + val, err := asn1.MarshalWithParams(e.BuildSignerDigest, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDBuildSignerDigest, + Value: val, + }) + } + if e.RunnerEnvironment != "" { + val, err := asn1.MarshalWithParams(e.RunnerEnvironment, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDRunnerEnvironment, + Value: val, + }) + } + if e.SourceRepositoryURI != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryURI, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryURI, + Value: val, + }) + } + if e.SourceRepositoryDigest != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryDigest, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryDigest, + Value: val, + }) + } + if e.SourceRepositoryRef != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryRef, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryRef, + Value: val, + }) + } + if e.SourceRepositoryIdentifier != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryIdentifier, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryIdentifier, + Value: val, + }) + } + if e.SourceRepositoryOwnerURI != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryOwnerURI, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryOwnerURI, + Value: val, + }) + } + if e.SourceRepositoryOwnerIdentifier != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryOwnerIdentifier, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryOwnerIdentifier, + Value: val, + }) + } + if e.BuildConfigURI != "" { + val, err := asn1.MarshalWithParams(e.BuildConfigURI, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDBuildConfigURI, + Value: val, + }) + } + if e.BuildConfigDigest != "" { + val, err := asn1.MarshalWithParams(e.BuildConfigDigest, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDBuildConfigDigest, + Value: val, + }) + } + if e.BuildTrigger != "" { + val, err := asn1.MarshalWithParams(e.BuildTrigger, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDBuildTrigger, + Value: val, + }) + } + if e.RunInvocationURI != "" { + val, err := asn1.MarshalWithParams(e.RunInvocationURI, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDRunInvocationURI, + Value: val, + }) + } + return exts, nil } -func ParseExtensions(ext []pkix.Extension) (Extensions, error) { +func parseExtensions(ext []pkix.Extension) (Extensions, error) { out := Extensions{} for _, e := range ext { switch { + // BEGIN: Deprecated case e.Id.Equal(OIDIssuer): out.Issuer = string(e.Value) case e.Id.Equal(OIDGitHubWorkflowTrigger): @@ -123,6 +342,63 @@ func ParseExtensions(ext []pkix.Extension) (Extensions, error) { out.GithubWorkflowRepository = string(e.Value) case e.Id.Equal(OIDGitHubWorkflowRef): out.GithubWorkflowRef = string(e.Value) + // END: Deprecated + case e.Id.Equal(OIDIssuerV2): + if err := parseDERString(e.Value, &out.Issuer); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerURI): + if err := parseDERString(e.Value, &out.BuildSignerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildSignerDigest): + if err := parseDERString(e.Value, &out.BuildSignerDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunnerEnvironment): + if err := parseDERString(e.Value, &out.RunnerEnvironment); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryURI): + if err := parseDERString(e.Value, &out.SourceRepositoryURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryDigest): + if err := parseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryRef): + if err := parseDERString(e.Value, &out.SourceRepositoryRef); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryIdentifier): + if err := parseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerURI): + if err := parseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier): + if err := parseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigURI): + if err := parseDERString(e.Value, &out.BuildConfigURI); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildConfigDigest): + if err := parseDERString(e.Value, &out.BuildConfigDigest); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDBuildTrigger): + if err := parseDERString(e.Value, &out.BuildTrigger); err != nil { + return Extensions{}, err + } + case e.Id.Equal(OIDRunInvocationURI): + if err := parseDERString(e.Value, &out.RunInvocationURI); err != nil { + return Extensions{}, err + } } } @@ -130,3 +406,16 @@ func ParseExtensions(ext []pkix.Extension) (Extensions, error) { // more complex parsing of fields in a backwards compatible way if needed. return out, nil } + +// parseDERString decodes a DER-encoded string and puts the value in parsedVal. +// Rerturns an error if the unmarshalling fails or if there are trailing bytes in the encoding. +func parseDERString(val []byte, parsedVal *string) error { + rest, err := asn1.Unmarshal(val, parsedVal) + if err != nil { + return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %v", err) + } + if len(rest) != 0 { + return errors.New("unexpected trailing bytes in DER-encoded string") + } + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go index dffbecd33..139e3ed11 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go @@ -282,7 +282,7 @@ func (m *IntotoV001SchemaContent) UnmarshalBinary(b []byte) error { return nil } -// IntotoV001SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope +// IntotoV001SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope; this is computed by the rekor server, client-provided values are ignored // // swagger:model IntotoV001SchemaContentHash type IntotoV001SchemaContentHash struct { @@ -392,7 +392,7 @@ func (m *IntotoV001SchemaContentHash) UnmarshalBinary(b []byte) error { return nil } -// IntotoV001SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope +// IntotoV001SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope; this is computed by the rekor server, client-provided values are ignored // // swagger:model IntotoV001SchemaContentPayloadHash type IntotoV001SchemaContentPayloadHash struct { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go index 86c0b47f5..3297e5a91 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go @@ -132,7 +132,8 @@ func (m *IntotoV002Schema) UnmarshalBinary(b []byte) error { type IntotoV002SchemaContent struct { // envelope - Envelope *IntotoV002SchemaContentEnvelope `json:"envelope,omitempty"` + // Required: true + Envelope *IntotoV002SchemaContentEnvelope `json:"envelope"` // hash Hash *IntotoV002SchemaContentHash `json:"hash,omitempty"` @@ -164,8 +165,9 @@ func (m *IntotoV002SchemaContent) Validate(formats strfmt.Registry) error { } func (m *IntotoV002SchemaContent) validateEnvelope(formats strfmt.Registry) error { - if swag.IsZero(m.Envelope) { // not required - return nil + + if err := validate.Required("content"+"."+"envelope", "body", m.Envelope); err != nil { + return err } if m.Envelope != nil { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go index ada461b00..08bc595c3 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go @@ -41,7 +41,7 @@ type SearchIndex struct { Email strfmt.Email `json:"email,omitempty"` // hash - // Pattern: ^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$ + // Pattern: ^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$ Hash string `json:"hash,omitempty"` // operator @@ -95,7 +95,7 @@ func (m *SearchIndex) validateHash(formats strfmt.Registry) error { return nil } - if err := validate.Pattern("hash", "body", m.Hash, `^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$`); err != nil { + if err := validate.Pattern("hash", "body", m.Hash, `^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$`); err != nil { return err } diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go deleted file mode 100644 index 19fd90e94..000000000 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go +++ /dev/null @@ -1,91 +0,0 @@ -package cwriter - -import ( - "bytes" - "errors" - "io" - "os" - "strconv" -) - -// ErrNotTTY not a TeleTYpewriter error. -var ErrNotTTY = errors.New("not a terminal") - -// https://github.com/dylanaraps/pure-sh-bible#cursor-movement -const ( - escOpen = "\x1b[" - cuuAndEd = "A\x1b[J" -) - -// Writer is a buffered the writer that updates the terminal. The -// contents of writer will be flushed when Flush is called. -type Writer struct { - out io.Writer - buf bytes.Buffer - lines int // how much lines to clear before flushing new ones - fd int - terminal bool - termSize func(int) (int, int, error) -} - -// New returns a new Writer with defaults. -func New(out io.Writer) *Writer { - w := &Writer{ - out: out, - termSize: func(_ int) (int, int, error) { - return -1, -1, ErrNotTTY - }, - } - if f, ok := out.(*os.File); ok { - w.fd = int(f.Fd()) - if IsTerminal(w.fd) { - w.terminal = true - w.termSize = func(fd int) (int, int, error) { - return GetSize(fd) - } - } - } - return w -} - -// Flush flushes the underlying buffer. -func (w *Writer) Flush(lines int) (err error) { - // some terminals interpret 'cursor up 0' as 'cursor up 1' - if w.lines > 0 { - err = w.clearLines() - if err != nil { - return - } - } - w.lines = lines - _, err = w.buf.WriteTo(w.out) - return -} - -// Write appends the contents of p to the underlying buffer. -func (w *Writer) Write(p []byte) (n int, err error) { - return w.buf.Write(p) -} - -// WriteString writes string to the underlying buffer. -func (w *Writer) WriteString(s string) (n int, err error) { - return w.buf.WriteString(s) -} - -// ReadFrom reads from the provided io.Reader and writes to the -// underlying buffer. -func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { - return w.buf.ReadFrom(r) -} - -// GetTermSize returns WxH of underlying terminal. -func (w *Writer) GetTermSize() (width, height int, err error) { - return w.termSize(w.fd) -} - -func (w *Writer) ansiCuuAndEd() error { - buf := make([]byte, 8) - buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lines), 10) - _, err := w.out.Write(append(buf, cuuAndEd...)) - return err -} diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_posix.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_posix.go deleted file mode 100644 index f54a5d06b..000000000 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_posix.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !windows - -package cwriter - -import ( - "golang.org/x/sys/unix" -) - -func (w *Writer) clearLines() error { - return w.ansiCuuAndEd() -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (width, height int, err error) { - ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) - if err != nil { - return -1, -1, err - } - return int(ws.Col), int(ws.Row), nil -} - -// IsTerminal returns whether the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go deleted file mode 100644 index c6a34384e..000000000 --- a/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go +++ /dev/null @@ -1,10 +0,0 @@ -package decor - -import "io" - -func mustWriteString(w io.Writer, s string) { - _, err := io.WriteString(w, s) - if err != nil { - panic(err) - } -} diff --git a/vendor/github.com/vbauerster/mpb/v7/progress.go b/vendor/github.com/vbauerster/mpb/v7/progress.go deleted file mode 100644 index 1dda06e5f..000000000 --- a/vendor/github.com/vbauerster/mpb/v7/progress.go +++ /dev/null @@ -1,424 +0,0 @@ -package mpb - -import ( - "bytes" - "container/heap" - "context" - "fmt" - "io" - "math" - "os" - "sync" - "time" - - "github.com/vbauerster/mpb/v7/cwriter" -) - -const ( - prr = 150 * time.Millisecond // default RefreshRate -) - -// Progress represents a container that renders one or more progress bars. -type Progress struct { - ctx context.Context - uwg *sync.WaitGroup - cwg *sync.WaitGroup - bwg *sync.WaitGroup - operateState chan func(*pState) - done chan struct{} - refreshCh chan time.Time - once sync.Once -} - -// pState holds bars in its priorityQueue, it gets passed to (*Progress).serve monitor goroutine. -type pState struct { - bHeap priorityQueue - heapUpdated bool - pMatrix map[int][]chan int - aMatrix map[int][]chan int - - // following are provided/overrided by user - idCount int - reqWidth int - popPriority int - popCompleted bool - outputDiscarded bool - rr time.Duration - uwg *sync.WaitGroup - externalRefresh <-chan interface{} - renderDelay <-chan struct{} - shutdownNotifier chan struct{} - queueBars map[*Bar]*Bar - output io.Writer - debugOut io.Writer -} - -// New creates new Progress container instance. It's not possible to -// reuse instance after (*Progress).Wait method has been called. -func New(options ...ContainerOption) *Progress { - return NewWithContext(context.Background(), options...) -} - -// NewWithContext creates new Progress container instance with provided -// context. It's not possible to reuse instance after (*Progress).Wait -// method has been called. -func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { - s := &pState{ - bHeap: priorityQueue{}, - rr: prr, - queueBars: make(map[*Bar]*Bar), - output: os.Stdout, - popPriority: math.MinInt32, - } - - for _, opt := range options { - if opt != nil { - opt(s) - } - } - - p := &Progress{ - ctx: ctx, - uwg: s.uwg, - cwg: new(sync.WaitGroup), - bwg: new(sync.WaitGroup), - operateState: make(chan func(*pState)), - done: make(chan struct{}), - } - - p.cwg.Add(1) - go p.serve(s, cwriter.New(s.output)) - return p -} - -// AddBar creates a bar with default bar filler. -func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { - return p.New(total, BarStyle(), options...) -} - -// AddSpinner creates a bar with default spinner filler. -func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar { - return p.New(total, SpinnerStyle(), options...) -} - -// New creates a bar with provided BarFillerBuilder. -func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOption) *Bar { - return p.Add(total, builder.Build(), options...) -} - -// Add creates a bar which renders itself by provided filler. -// If `total <= 0` triggering complete event by increment methods is disabled. -// Panics if *Progress instance is done, i.e. called after (*Progress).Wait(). -func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) *Bar { - if filler == nil { - filler = NopStyle().Build() - } - p.bwg.Add(1) - result := make(chan *Bar) - select { - case p.operateState <- func(ps *pState) { - bs := ps.makeBarState(total, filler, options...) - bar := newBar(p, bs) - if bs.wait.bar != nil { - ps.queueBars[bs.wait.bar] = bar - } else { - heap.Push(&ps.bHeap, bar) - ps.heapUpdated = true - } - ps.idCount++ - result <- bar - }: - bar := <-result - return bar - case <-p.done: - p.bwg.Done() - panic(fmt.Sprintf("%T instance can't be reused after it's done!", p)) - } -} - -func (p *Progress) traverseBars(cb func(b *Bar) bool) { - sync := make(chan struct{}) - select { - case p.operateState <- func(s *pState) { - for i := 0; i < s.bHeap.Len(); i++ { - bar := s.bHeap[i] - if !cb(bar) { - break - } - } - close(sync) - }: - <-sync - case <-p.done: - } -} - -// UpdateBarPriority same as *Bar.SetPriority(int). -func (p *Progress) UpdateBarPriority(b *Bar, priority int) { - select { - case p.operateState <- func(s *pState) { - if b.index < 0 { - return - } - b.priority = priority - heap.Fix(&s.bHeap, b.index) - }: - case <-p.done: - } -} - -// BarCount returns bars count. -func (p *Progress) BarCount() int { - result := make(chan int) - select { - case p.operateState <- func(s *pState) { result <- s.bHeap.Len() }: - return <-result - case <-p.done: - return 0 - } -} - -// Wait waits for all bars to complete and finally shutdowns container. -// After this method has been called, there is no way to reuse *Progress -// instance. -func (p *Progress) Wait() { - // wait for user wg, if any - if p.uwg != nil { - p.uwg.Wait() - } - - // wait for bars to quit, if any - p.bwg.Wait() - - p.once.Do(p.shutdown) - - // wait for container to quit - p.cwg.Wait() -} - -func (p *Progress) shutdown() { - close(p.done) -} - -func (p *Progress) serve(s *pState, cw *cwriter.Writer) { - defer p.cwg.Done() - - p.refreshCh = s.newTicker(p.done) - - render := func(debugOut io.Writer) { - err := s.render(cw) - for err != nil { - if debugOut != nil { - _, err = fmt.Fprintln(debugOut, err) - } else { - panic(err) - } - debugOut = nil - } - } - - for { - select { - case op := <-p.operateState: - op(s) - case <-p.refreshCh: - render(s.debugOut) - case <-s.shutdownNotifier: - for s.heapUpdated { - render(s.debugOut) - } - return - } - } -} - -func (s *pState) render(cw *cwriter.Writer) error { - if s.heapUpdated { - s.updateSyncMatrix() - s.heapUpdated = false - } - syncWidth(s.pMatrix) - syncWidth(s.aMatrix) - - width, height, err := cw.GetTermSize() - if err != nil { - width = s.reqWidth - height = s.bHeap.Len() - } - for i := 0; i < s.bHeap.Len(); i++ { - bar := s.bHeap[i] - go bar.render(width) - } - - return s.flush(cw, height) -} - -func (s *pState) flush(cw *cwriter.Writer, height int) error { - var wg sync.WaitGroup - var popCount int - rows := make([]io.Reader, 0, height) - pool := make([]*Bar, 0, s.bHeap.Len()) - for s.bHeap.Len() > 0 { - var usedRows int - b := heap.Pop(&s.bHeap).(*Bar) - frame := <-b.frameCh - for i := len(frame.rows) - 1; i >= 0; i-- { - if row := frame.rows[i]; len(rows) < height { - rows = append(rows, row) - usedRows++ - } else { - wg.Add(1) - go func() { - _, _ = io.Copy(io.Discard, row) - wg.Done() - }() - } - } - if frame.shutdown != 0 { - b.Wait() // waiting for b.done, so it's safe to read b.bs - drop := b.bs.dropOnComplete - if qb, ok := s.queueBars[b]; ok { - delete(s.queueBars, b) - qb.priority = b.priority - pool = append(pool, qb) - drop = true - } else if s.popCompleted && !b.bs.noPop { - if frame.shutdown > 1 { - popCount += usedRows - drop = true - } else { - s.popPriority++ - b.priority = s.popPriority - } - } - if drop { - s.heapUpdated = true - continue - } - } - pool = append(pool, b) - } - - for _, b := range pool { - heap.Push(&s.bHeap, b) - } - - for i := len(rows) - 1; i >= 0; i-- { - _, err := cw.ReadFrom(rows[i]) - if err != nil { - wg.Wait() - return err - } - } - - wg.Wait() - return cw.Flush(len(rows) - popCount) -} - -func (s *pState) newTicker(done <-chan struct{}) chan time.Time { - ch := make(chan time.Time) - if s.shutdownNotifier == nil { - s.shutdownNotifier = make(chan struct{}) - } - go func() { - if s.renderDelay != nil { - <-s.renderDelay - } - var internalRefresh <-chan time.Time - if !s.outputDiscarded { - if s.externalRefresh == nil { - ticker := time.NewTicker(s.rr) - defer ticker.Stop() - internalRefresh = ticker.C - } - } else { - s.externalRefresh = nil - } - for { - select { - case t := <-internalRefresh: - ch <- t - case x := <-s.externalRefresh: - if t, ok := x.(time.Time); ok { - ch <- t - } else { - ch <- time.Now() - } - case <-done: - close(s.shutdownNotifier) - return - } - } - }() - return ch -} - -func (s *pState) updateSyncMatrix() { - s.pMatrix = make(map[int][]chan int) - s.aMatrix = make(map[int][]chan int) - for i := 0; i < s.bHeap.Len(); i++ { - bar := s.bHeap[i] - table := bar.wSyncTable() - pRow, aRow := table[0], table[1] - - for i, ch := range pRow { - s.pMatrix[i] = append(s.pMatrix[i], ch) - } - - for i, ch := range aRow { - s.aMatrix[i] = append(s.aMatrix[i], ch) - } - } -} - -func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState { - bs := &bState{ - id: s.idCount, - priority: s.idCount, - reqWidth: s.reqWidth, - total: total, - filler: filler, - debugOut: s.debugOut, - } - - if total > 0 { - bs.triggerComplete = true - } - - for _, opt := range options { - if opt != nil { - opt(bs) - } - } - - if bs.middleware != nil { - bs.filler = bs.middleware(filler) - bs.middleware = nil - } - - for i := 0; i < len(bs.buffers); i++ { - bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512)) - } - - bs.subscribeDecorators() - - return bs -} - -func syncWidth(matrix map[int][]chan int) { - for _, column := range matrix { - go maxWidthDistributor(column) - } -} - -func maxWidthDistributor(column []chan int) { - var maxWidth int - for _, ch := range column { - if w := <-ch; w > maxWidth { - maxWidth = w - } - } - for _, ch := range column { - ch <- maxWidth - } -} diff --git a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go deleted file mode 100644 index 51b63ea7a..000000000 --- a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go +++ /dev/null @@ -1,79 +0,0 @@ -package mpb - -import ( - "io" - "time" -) - -type proxyReader struct { - io.ReadCloser - bar *Bar -} - -func (x proxyReader) Read(p []byte) (int, error) { - n, err := x.ReadCloser.Read(p) - x.bar.IncrBy(n) - return n, err -} - -type proxyWriterTo struct { - proxyReader - wt io.WriterTo -} - -func (x proxyWriterTo) WriteTo(w io.Writer) (int64, error) { - n, err := x.wt.WriteTo(w) - x.bar.IncrInt64(n) - return n, err -} - -type ewmaProxyReader struct { - proxyReader -} - -func (x ewmaProxyReader) Read(p []byte) (int, error) { - start := time.Now() - n, err := x.proxyReader.Read(p) - if n > 0 { - x.bar.DecoratorEwmaUpdate(time.Since(start)) - } - return n, err -} - -type ewmaProxyWriterTo struct { - ewmaProxyReader - wt proxyWriterTo -} - -func (x ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { - start := time.Now() - n, err := x.wt.WriteTo(w) - if n > 0 { - x.bar.DecoratorEwmaUpdate(time.Since(start)) - } - return n, err -} - -func (b *Bar) newProxyReader(r io.Reader) (rc io.ReadCloser) { - pr := proxyReader{toReadCloser(r), b} - if wt, ok := r.(io.WriterTo); ok { - pw := proxyWriterTo{pr, wt} - if b.hasEwma { - rc = ewmaProxyWriterTo{ewmaProxyReader{pr}, pw} - } else { - rc = pw - } - } else if b.hasEwma { - rc = ewmaProxyReader{pr} - } else { - rc = pr - } - return rc -} - -func toReadCloser(r io.Reader) io.ReadCloser { - if rc, ok := r.(io.ReadCloser); ok { - return rc - } - return io.NopCloser(r) -} diff --git a/vendor/github.com/vbauerster/mpb/v7/.gitignore b/vendor/github.com/vbauerster/mpb/v8/.gitignore similarity index 100% rename from vendor/github.com/vbauerster/mpb/v7/.gitignore rename to vendor/github.com/vbauerster/mpb/v8/.gitignore diff --git a/vendor/github.com/vbauerster/mpb/v7/README.md b/vendor/github.com/vbauerster/mpb/v8/README.md similarity index 89% rename from vendor/github.com/vbauerster/mpb/v7/README.md rename to vendor/github.com/vbauerster/mpb/v8/README.md index 413f9e1db..d5f787276 100644 --- a/vendor/github.com/vbauerster/mpb/v7/README.md +++ b/vendor/github.com/vbauerster/mpb/v8/README.md @@ -1,8 +1,8 @@ # Multi Progress Bar -[![GoDoc](https://pkg.go.dev/badge/github.com/vbauerster/mpb)](https://pkg.go.dev/github.com/vbauerster/mpb/v7) +[![GoDoc](https://pkg.go.dev/badge/github.com/vbauerster/mpb)](https://pkg.go.dev/github.com/vbauerster/mpb/v8) [![Test status](https://github.com/vbauerster/mpb/actions/workflows/test.yml/badge.svg)](https://github.com/vbauerster/mpb/actions/workflows/test.yml) -[![Donate with PayPal](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.me/vbauerster) +[![Lint status](https://github.com/vbauerster/mpb/actions/workflows/golangci-lint.yml/badge.svg)](https://github.com/vbauerster/mpb/actions/workflows/golangci-lint.yml) **mpb** is a Go lib for rendering progress bars in terminal applications. @@ -26,8 +26,8 @@ import ( "math/rand" "time" - "github.com/vbauerster/mpb/v7" - "github.com/vbauerster/mpb/v7/decor" + "github.com/vbauerster/mpb/v8" + "github.com/vbauerster/mpb/v8/decor" ) func main() { @@ -97,9 +97,8 @@ func main() { // EWMA's unit of measure is an iteration's duration start := time.Now() time.Sleep(time.Duration(rng.Intn(10)+1) * max / 10) - bar.Increment() - // we need to call DecoratorEwmaUpdate to fulfill ewma decorator's contract - bar.DecoratorEwmaUpdate(time.Since(start)) + // we need to call EwmaIncrement to fulfill ewma decorator's contract + bar.EwmaIncrement(time.Since(start)) } }() } diff --git a/vendor/github.com/vbauerster/mpb/v7/UNLICENSE b/vendor/github.com/vbauerster/mpb/v8/UNLICENSE similarity index 100% rename from vendor/github.com/vbauerster/mpb/v7/UNLICENSE rename to vendor/github.com/vbauerster/mpb/v8/UNLICENSE diff --git a/vendor/github.com/vbauerster/mpb/v7/bar.go b/vendor/github.com/vbauerster/mpb/v8/bar.go similarity index 53% rename from vendor/github.com/vbauerster/mpb/v7/bar.go rename to vendor/github.com/vbauerster/mpb/v8/bar.go index 7db860e30..de780fcac 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -3,49 +3,47 @@ package mpb import ( "bytes" "context" - "fmt" "io" - "runtime/debug" "strings" "sync" "time" "github.com/acarl005/stripansi" "github.com/mattn/go-runewidth" - "github.com/vbauerster/mpb/v7/decor" + "github.com/vbauerster/mpb/v8/decor" ) // Bar represents a progress bar. type Bar struct { - index int // used by heap - priority int // used by heap - hasEwma bool - frameCh chan *renderFrame - operateState chan func(*bState) - done chan struct{} - container *Progress - bs *bState - cancel func() - recoveredPanic interface{} + index int // used by heap + priority int // used by heap + frameCh chan *renderFrame + operateState chan func(*bState) + done chan struct{} + container *Progress + bs *bState + cancel func() } -type extenderFunc func(rows []io.Reader, width int, stat decor.Statistics) []io.Reader +type syncTable [2][]chan int +type extenderFunc func([]io.Reader, decor.Statistics) ([]io.Reader, error) // bState is actual bar's state. type bState struct { id int priority int reqWidth int + shutdown int total int64 current int64 refill int64 - lastIncrement int64 trimSpace bool completed bool aborted bool triggerComplete bool - dropOnComplete bool + rmOnComplete bool noPop bool + autoRefresh bool aDecorators []decor.Decorator pDecorators []decor.Decorator averageDecorators []decor.AverageDecorator @@ -53,27 +51,25 @@ type bState struct { shutdownListeners []decor.ShutdownListener buffers [3]*bytes.Buffer filler BarFiller - middleware func(BarFiller) BarFiller extender extenderFunc - debugOut io.Writer - - wait struct { - bar *Bar // key for (*pState).queueBars - sync bool - } + renderReq chan<- time.Time + waitBar *Bar // key for (*pState).queueBars } type renderFrame struct { - rows []io.Reader - shutdown int + rows []io.Reader + shutdown int + rmOnComplete bool + noPop bool + done bool + err error } -func newBar(container *Progress, bs *bState) *Bar { - ctx, cancel := context.WithCancel(container.ctx) +func newBar(ctx context.Context, container *Progress, bs *bState) *Bar { + ctx, cancel := context.WithCancel(ctx) bar := &Bar{ priority: bs.priority, - hasEwma: len(bs.ewmaDecorators) != 0, frameCh: make(chan *renderFrame, 1), operateState: make(chan func(*bState)), done: make(chan struct{}), @@ -81,24 +77,42 @@ func newBar(container *Progress, bs *bState) *Bar { cancel: cancel, } + container.bwg.Add(1) go bar.serve(ctx, bs) return bar } -// ProxyReader wraps r with metrics required for progress tracking. -// If r is 'unknown total/size' reader it's mandatory to call -// (*Bar).SetTotal(-1, true) method after (Reader).Read returns io.EOF. -// Panics if r is nil. If bar is already completed or aborted, returns -// nil. +// ProxyReader wraps io.Reader with metrics required for progress tracking. +// If `r` is 'unknown total/size' reader it's mandatory to call +// (*Bar).SetTotal(-1, true) method after (io.Reader).Read returns io.EOF. +// If bar is already completed or aborted, returns nil. +// Panics if `r` is nil. func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser { if r == nil { panic("expected non nil io.Reader") } + result := make(chan bool) select { + case b.operateState <- func(s *bState) { result <- len(s.ewmaDecorators) != 0 }: + return newProxyReader(r, b, <-result) + case <-b.done: + return nil + } +} + +// ProxyWriter wraps io.Writer with metrics required for progress tracking. +// If bar is already completed or aborted, returns nil. +// Panics if `w` is nil. +func (b *Bar) ProxyWriter(w io.Writer) io.WriteCloser { + if w == nil { + panic("expected non nil io.Writer") + } + result := make(chan bool) + select { + case b.operateState <- func(s *bState) { result <- len(s.ewmaDecorators) != 0 }: + return newProxyWriter(w, b, <-result) case <-b.done: return nil - default: - return b.newProxyReader(r) } } @@ -130,35 +144,35 @@ func (b *Bar) Current() int64 { // operation for example. func (b *Bar) SetRefill(amount int64) { select { - case b.operateState <- func(s *bState) { - s.refill = amount - }: + case b.operateState <- func(s *bState) { s.refill = amount }: case <-b.done: } } // TraverseDecorators traverses all available decorators and calls cb func on each. func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { - sync := make(chan struct{}) + iter := make(chan decor.Decorator) select { case b.operateState <- func(s *bState) { - for _, decorators := range [...][]decor.Decorator{ + for _, decorators := range [][]decor.Decorator{ s.pDecorators, s.aDecorators, } { for _, d := range decorators { - cb(extractBaseDecorator(d)) + iter <- d } } - close(sync) + close(iter) }: - <-sync + for d := range iter { + cb(unwrap(d)) + } case <-b.done: } } // EnableTriggerComplete enables triggering complete event. It's -// effective only for bar which was constructed with `total <= 0` and +// effective only for bars which were constructed with `total <= 0` and // after total has been set with (*Bar).SetTotal(int64, false). If bar // has been incremented to the total, complete event is triggered right // away. @@ -171,7 +185,7 @@ func (b *Bar) EnableTriggerComplete() { if s.current >= s.total { s.current = s.total s.completed = true - go b.forceRefresh() + b.triggerCompletion(s) } else { s.triggerComplete = true } @@ -182,10 +196,10 @@ func (b *Bar) EnableTriggerComplete() { // SetTotal sets total to an arbitrary value. It's effective only for // bar which was constructed with `total <= 0`. Setting total to negative -// value is equivalent to (*Bar).SetTotal((*Bar).Current(), bool). -// If triggerCompleteNow is true, total value is set to current and +// value is equivalent to (*Bar).SetTotal((*Bar).Current(), bool) but faster. +// If triggerCompletion is true, total value is set to current and // complete event is triggered right away. -func (b *Bar) SetTotal(total int64, triggerCompleteNow bool) { +func (b *Bar) SetTotal(total int64, triggerCompletion bool) { select { case b.operateState <- func(s *bState) { if s.triggerComplete { @@ -196,10 +210,10 @@ func (b *Bar) SetTotal(total int64, triggerCompleteNow bool) { } else { s.total = total } - if triggerCompleteNow { + if triggerCompletion { s.current = s.total s.completed = true - go b.forceRefresh() + b.triggerCompletion(s) } }: case <-b.done: @@ -207,16 +221,39 @@ func (b *Bar) SetTotal(total int64, triggerCompleteNow bool) { } // SetCurrent sets progress' current to an arbitrary value. -// Setting a negative value will cause a panic. func (b *Bar) SetCurrent(current int64) { + if current < 0 { + return + } + select { + case b.operateState <- func(s *bState) { + s.current = current + if s.triggerComplete && s.current >= s.total { + s.current = s.total + s.completed = true + b.triggerCompletion(s) + } + }: + case <-b.done: + } +} + +// EwmaSetCurrent sets progress' current to an arbitrary value and updates +// EWMA based decorators by dur of a single iteration. +func (b *Bar) EwmaSetCurrent(current int64, iterDur time.Duration) { + if current < 0 { + return + } select { case b.operateState <- func(s *bState) { - s.lastIncrement = current - s.current + if n := current - s.current; n > 0 { + s.decoratorEwmaUpdate(n, iterDur) + } s.current = current if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true - go b.forceRefresh() + b.triggerCompletion(s) } }: case <-b.done: @@ -240,37 +277,44 @@ func (b *Bar) IncrInt64(n int64) { } select { case b.operateState <- func(s *bState) { - s.lastIncrement = n s.current += n if s.triggerComplete && s.current >= s.total { s.current = s.total s.completed = true - go b.forceRefresh() + b.triggerCompletion(s) } }: case <-b.done: } } -// DecoratorEwmaUpdate updates all EWMA based decorators. Should be -// called on each iteration, because EWMA's unit of measure is an -// iteration's duration. Panics if called before *Bar.Incr... family -// methods. -func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) { +// EwmaIncrement is a shorthand for b.EwmaIncrInt64(1, iterDur). +func (b *Bar) EwmaIncrement(iterDur time.Duration) { + b.EwmaIncrInt64(1, iterDur) +} + +// EwmaIncrBy is a shorthand for b.EwmaIncrInt64(int64(n), iterDur). +func (b *Bar) EwmaIncrBy(n int, iterDur time.Duration) { + b.EwmaIncrInt64(int64(n), iterDur) +} + +// EwmaIncrInt64 increments progress by amount of n and updates EWMA based +// decorators by dur of a single iteration. +func (b *Bar) EwmaIncrInt64(n int64, iterDur time.Duration) { + if n <= 0 { + return + } select { case b.operateState <- func(s *bState) { - if s.lastIncrement > 0 { - s.decoratorEwmaUpdate(dur) - s.lastIncrement = 0 - } else { - panic("increment required before ewma iteration update") + s.decoratorEwmaUpdate(n, iterDur) + s.current += n + if s.triggerComplete && s.current >= s.total { + s.current = s.total + s.completed = true + b.triggerCompletion(s) } }: case <-b.done: - if b.bs.lastIncrement > 0 { - b.bs.decoratorEwmaUpdate(dur) - b.bs.lastIncrement = 0 - } } } @@ -279,9 +323,7 @@ func (b *Bar) DecoratorEwmaUpdate(dur time.Duration) { // or after progress resume. func (b *Bar) DecoratorAverageAdjust(start time.Time) { select { - case b.operateState <- func(s *bState) { - s.decoratorAverageAdjust(start) - }: + case b.operateState <- func(s *bState) { s.decoratorAverageAdjust(start) }: case <-b.done: } } @@ -304,8 +346,8 @@ func (b *Bar) Abort(drop bool) { return } s.aborted = true - s.dropOnComplete = drop - go b.forceRefresh() + s.rmOnComplete = drop + b.triggerCompletion(s) }: case <-b.done: } @@ -333,6 +375,18 @@ func (b *Bar) Completed() bool { } } +// IsRunning reports whether the bar is running, i.e. not yet completed +// and not yet aborted. +func (b *Bar) IsRunning() bool { + result := make(chan bool) + select { + case b.operateState <- func(s *bState) { result <- !s.completed && !s.aborted }: + return <-result + case <-b.done: + return false + } +} + // Wait blocks until bar is completed or aborted. func (b *Bar) Wait() { <-b.done @@ -340,9 +394,6 @@ func (b *Bar) Wait() { func (b *Bar) serve(ctx context.Context, bs *bState) { defer b.container.bwg.Done() - if bs.wait.bar != nil && bs.wait.sync { - bs.wait.bar.Wait() - } for { select { case op := <-b.operateState: @@ -358,74 +409,65 @@ func (b *Bar) serve(ctx context.Context, bs *bState) { } func (b *Bar) render(tw int) { - select { - case b.operateState <- func(s *bState) { + var done bool + fn := func(s *bState) { var rows []io.Reader stat := newStatistics(tw, s) - defer func() { - // recovering if user defined decorator panics for example - if p := recover(); p != nil { - if s.debugOut != nil { - for _, fn := range []func() (int, error){ - func() (int, error) { - return fmt.Fprintln(s.debugOut, p) - }, - func() (int, error) { - return s.debugOut.Write(debug.Stack()) - }, - } { - if _, err := fn(); err != nil { - panic(err) - } - } - } - s.aborted = !s.completed - s.extender = makePanicExtender(p) - b.recoveredPanic = p - } - if fn := s.extender; fn != nil { - rows = fn(rows, s.reqWidth, stat) - } - frame := &renderFrame{ - rows: rows, - } - if s.completed || s.aborted { - b.cancel() - frame.shutdown++ - } - b.frameCh <- frame - }() - if b.recoveredPanic == nil { - rows = append(rows, s.draw(stat)) - } - }: - case <-b.done: - var rows []io.Reader - s, stat := b.bs, newStatistics(tw, b.bs) - if b.recoveredPanic == nil { - rows = append(rows, s.draw(stat)) + r, err := s.draw(stat) + if err != nil { + b.frameCh <- &renderFrame{err: err} + return } - if fn := s.extender; fn != nil { - rows = fn(rows, s.reqWidth, stat) + rows = append(rows, r) + if s.extender != nil { + rows, err = s.extender(rows, stat) + if err != nil { + b.frameCh <- &renderFrame{err: err} + return + } } frame := &renderFrame{ - rows: rows, + rows: rows, + shutdown: s.shutdown, + rmOnComplete: s.rmOnComplete, + noPop: s.noPop, + done: done, + } + if s.completed || s.aborted { + // post increment makes sure OnComplete decorators are rendered + s.shutdown++ } b.frameCh <- frame } + select { + case b.operateState <- fn: + case <-b.done: + done = true + fn(b.bs) + } +} + +func (b *Bar) triggerCompletion(s *bState) { + if s.autoRefresh { + // Technically this call isn't required, but if refresh rate is set to + // one hour for example and bar completes within a few minutes p.Wait() + // will wait for one hour. This call helps to avoid unnecessary waiting. + go b.tryEarlyRefresh(s.renderReq) + } else { + b.cancel() + } } -func (b *Bar) forceRefresh() { +func (b *Bar) tryEarlyRefresh(renderReq chan<- time.Time) { var anyOtherRunning bool b.container.traverseBars(func(bar *Bar) bool { - anyOtherRunning = b != bar && bar.isRunning() - return !anyOtherRunning + anyOtherRunning = b != bar && bar.IsRunning() + return anyOtherRunning }) if !anyOtherRunning { for { select { - case b.container.refreshCh <- time.Now(): - time.Sleep(prr) + case renderReq <- time.Now(): case <-b.done: return } @@ -433,20 +475,8 @@ func (b *Bar) forceRefresh() { } } -func (b *Bar) isRunning() bool { - result := make(chan bool) - select { - case b.operateState <- func(s *bState) { - result <- !s.completed && !s.aborted - }: - return <-result - case <-b.done: - return false - } -} - -func (b *Bar) wSyncTable() [][]chan int { - result := make(chan [][]chan int) +func (b *Bar) wSyncTable() syncTable { + result := make(chan syncTable) select { case b.operateState <- func(s *bState) { result <- s.wSyncTable() }: return <-result @@ -455,96 +485,119 @@ func (b *Bar) wSyncTable() [][]chan int { } } -func (s *bState) draw(stat decor.Statistics) io.Reader { - bufP, bufB, bufA := s.buffers[0], s.buffers[1], s.buffers[2] - nlr := bytes.NewReader([]byte("\n")) - tw := stat.AvailableWidth - for _, d := range s.pDecorators { - str := d.Decor(stat) - stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) - bufP.WriteString(str) - } - if stat.AvailableWidth < 1 { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufP.String()), tw, "…")) - bufP.Reset() - return io.MultiReader(trunc, nlr) +func (s *bState) draw(stat decor.Statistics) (io.Reader, error) { + r, err := s.drawImpl(stat) + if err != nil { + for _, b := range s.buffers { + b.Reset() + } + return nil, err } + return io.MultiReader(r, strings.NewReader("\n")), nil +} - if !s.trimSpace && stat.AvailableWidth > 1 { - stat.AvailableWidth -= 2 - bufB.WriteByte(' ') - defer bufB.WriteByte(' ') +func (s *bState) drawImpl(stat decor.Statistics) (io.Reader, error) { + decorFiller := func(buf *bytes.Buffer, decorators []decor.Decorator) (res struct { + width int + truncate bool + err error + }) { + res.width = stat.AvailableWidth + for _, d := range decorators { + str := d.Decor(stat) + if stat.AvailableWidth > 0 { + stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) + if res.err == nil { + _, res.err = buf.WriteString(str) + } + } + } + res.truncate = stat.AvailableWidth < 0 + return res } - tw = stat.AvailableWidth - for _, d := range s.aDecorators { - str := d.Decor(stat) - stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) - bufA.WriteString(str) + bufP, bufB, bufA := s.buffers[0], s.buffers[1], s.buffers[2] + + resP := decorFiller(bufP, s.pDecorators) + resA := decorFiller(bufA, s.aDecorators) + + for _, err := range []error{resP.err, resA.err} { + if err != nil { + return nil, err + } } - if stat.AvailableWidth < 1 { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufA.String()), tw, "…")) + + if resP.truncate { + trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufP.String()), resP.width, "…")) + bufP.Reset() bufA.Reset() - return io.MultiReader(bufP, bufB, trunc, nlr) + return trunc, nil } - s.filler.Fill(bufB, s.reqWidth, stat) - - return io.MultiReader(bufP, bufB, bufA, nlr) -} + if resA.truncate { + trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufA.String()), resA.width, "…")) + bufA.Reset() + return io.MultiReader(bufP, trunc), nil + } -func (s *bState) wSyncTable() [][]chan int { - columns := make([]chan int, 0, len(s.pDecorators)+len(s.aDecorators)) - var pCount int - for _, d := range s.pDecorators { - if ch, ok := d.Sync(); ok { - columns = append(columns, ch) - pCount++ + if !s.trimSpace && stat.AvailableWidth >= 2 { + stat.AvailableWidth -= 2 + writeFiller := func(buf *bytes.Buffer) error { + return s.filler.Fill(buf, stat) } - } - var aCount int - for _, d := range s.aDecorators { - if ch, ok := d.Sync(); ok { - columns = append(columns, ch) - aCount++ + for _, fn := range []func(*bytes.Buffer) error{ + writeSpace, + writeFiller, + writeSpace, + } { + if err := fn(bufB); err != nil { + return nil, err + } + } + } else { + err := s.filler.Fill(bufB, stat) + if err != nil { + return nil, err } } - table := make([][]chan int, 2) - table[0] = columns[0:pCount] - table[1] = columns[pCount : pCount+aCount : pCount+aCount] - return table + + return io.MultiReader(bufP, bufB, bufA), nil } -func (s *bState) subscribeDecorators() { - for _, decorators := range [...][]decor.Decorator{ +func (s *bState) wSyncTable() (table syncTable) { + var count int + var row []chan int + + for i, decorators := range [][]decor.Decorator{ s.pDecorators, s.aDecorators, } { for _, d := range decorators { - d = extractBaseDecorator(d) - if d, ok := d.(decor.AverageDecorator); ok { - s.averageDecorators = append(s.averageDecorators, d) - } - if d, ok := d.(decor.EwmaDecorator); ok { - s.ewmaDecorators = append(s.ewmaDecorators, d) - } - if d, ok := d.(decor.ShutdownListener); ok { - s.shutdownListeners = append(s.shutdownListeners, d) + if ch, ok := d.Sync(); ok { + row = append(row, ch) + count++ } } + switch i { + case 0: + table[i] = row[0:count] + default: + table[i] = row[len(table[i-1]):count] + } } + return table } -func (s bState) decoratorEwmaUpdate(dur time.Duration) { - wg := new(sync.WaitGroup) +func (s bState) decoratorEwmaUpdate(n int64, dur time.Duration) { + var wg sync.WaitGroup for i := 0; i < len(s.ewmaDecorators); i++ { switch d := s.ewmaDecorators[i]; i { case len(s.ewmaDecorators) - 1: - d.EwmaUpdate(s.lastIncrement, dur) + d.EwmaUpdate(n, dur) default: wg.Add(1) go func() { - d.EwmaUpdate(s.lastIncrement, dur) + d.EwmaUpdate(n, dur) wg.Done() }() } @@ -553,7 +606,7 @@ func (s bState) decoratorEwmaUpdate(dur time.Duration) { } func (s bState) decoratorAverageAdjust(start time.Time) { - wg := new(sync.WaitGroup) + var wg sync.WaitGroup for i := 0; i < len(s.averageDecorators); i++ { switch d := s.averageDecorators[i]; i { case len(s.averageDecorators) - 1: @@ -570,15 +623,15 @@ func (s bState) decoratorAverageAdjust(start time.Time) { } func (s bState) decoratorShutdownNotify() { - wg := new(sync.WaitGroup) + var wg sync.WaitGroup for i := 0; i < len(s.shutdownListeners); i++ { switch d := s.shutdownListeners[i]; i { case len(s.shutdownListeners) - 1: - d.Shutdown() + d.OnShutdown() default: wg.Add(1) go func() { - d.Shutdown() + d.OnShutdown() wg.Done() }() } @@ -589,6 +642,7 @@ func (s bState) decoratorShutdownNotify() { func newStatistics(tw int, s *bState) decor.Statistics { return decor.Statistics{ AvailableWidth: tw, + RequestedWidth: s.reqWidth, ID: s.id, Total: s.total, Current: s.current, @@ -598,20 +652,13 @@ func newStatistics(tw int, s *bState) decor.Statistics { } } -func extractBaseDecorator(d decor.Decorator) decor.Decorator { +func unwrap(d decor.Decorator) decor.Decorator { if d, ok := d.(decor.Wrapper); ok { - return extractBaseDecorator(d.Base()) + return unwrap(d.Unwrap()) } return d } -func makePanicExtender(p interface{}) extenderFunc { - pstr := fmt.Sprint(p) - return func(rows []io.Reader, _ int, stat decor.Statistics) []io.Reader { - r := io.MultiReader( - strings.NewReader(runewidth.Truncate(pstr, stat.AvailableWidth, "…")), - bytes.NewReader([]byte("\n")), - ) - return append(rows, r) - } +func writeSpace(buf *bytes.Buffer) error { + return buf.WriteByte(' ') } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler.go similarity index 52% rename from vendor/github.com/vbauerster/mpb/v7/bar_filler.go rename to vendor/github.com/vbauerster/mpb/v8/bar_filler.go index 81177fc2e..5ac343c7d 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler.go @@ -3,17 +3,13 @@ package mpb import ( "io" - "github.com/vbauerster/mpb/v7/decor" + "github.com/vbauerster/mpb/v8/decor" ) // BarFiller interface. // Bar (without decorators) renders itself by calling BarFiller's Fill method. -// -// reqWidth is requested width set by `func WithWidth(int) ContainerOption`. -// If not set, it defaults to terminal width. -// type BarFiller interface { - Fill(w io.Writer, reqWidth int, stat decor.Statistics) + Fill(io.Writer, decor.Statistics) error } // BarFillerBuilder interface. @@ -22,17 +18,16 @@ type BarFiller interface { // BarStyle() // SpinnerStyle() // NopStyle() -// type BarFillerBuilder interface { Build() BarFiller } // BarFillerFunc is function type adapter to convert compatible function // into BarFiller interface. -type BarFillerFunc func(w io.Writer, reqWidth int, stat decor.Statistics) +type BarFillerFunc func(io.Writer, decor.Statistics) error -func (f BarFillerFunc) Fill(w io.Writer, reqWidth int, stat decor.Statistics) { - f(w, reqWidth, stat) +func (f BarFillerFunc) Fill(w io.Writer, stat decor.Statistics) error { + return f(w, stat) } // BarFillerBuilderFunc is function type adapter to convert compatible @@ -42,9 +37,3 @@ type BarFillerBuilderFunc func() BarFiller func (f BarFillerBuilderFunc) Build() BarFiller { return f() } - -// NewBarFiller constructs a BarFiller from provided BarFillerBuilder. -// Deprecated. Prefer using `*Progress.New(...)` directly. -func NewBarFiller(b BarFillerBuilder) BarFiller { - return b.Build() -} diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go similarity index 85% rename from vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go rename to vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go index d8bf90a4a..1db8f0b4b 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go @@ -5,8 +5,8 @@ import ( "github.com/acarl005/stripansi" "github.com/mattn/go-runewidth" - "github.com/vbauerster/mpb/v7/decor" - "github.com/vbauerster/mpb/v7/internal" + "github.com/vbauerster/mpb/v8/decor" + "github.com/vbauerster/mpb/v8/internal" ) const ( @@ -36,8 +36,8 @@ type bFiller struct { components [components]*component tip struct { count uint - onComplete *component frames []*component + onComplete *component } } @@ -148,20 +148,22 @@ func (s *barStyle) Build() BarFiller { return bf } -func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { - width = internal.CheckRequestedWidth(width, stat.AvailableWidth) - brackets := s.components[iLbound].width + s.components[iRbound].width +func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) (err error) { + width := internal.CheckRequestedWidth(stat.RequestedWidth, stat.AvailableWidth) // don't count brackets as progress - width -= brackets + width -= (s.components[iLbound].width + s.components[iRbound].width) if width < 0 { - return + return nil } - mustWrite(w, s.components[iLbound].bytes) - defer mustWrite(w, s.components[iRbound].bytes) + _, err = w.Write(s.components[iLbound].bytes) + if err != nil { + return err + } if width == 0 { - return + _, err = w.Write(s.components[iRbound].bytes) + return err } var filling [][]byte @@ -171,7 +173,7 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { var refWidth int curWidth := int(internal.PercentageRound(stat.Total, stat.Current, uint(width))) - if stat.Current >= stat.Total { + if stat.Completed { tip = s.tip.onComplete } else { tip = s.tip.frames[s.tip.count%uint(len(s.tip.frames))] @@ -230,24 +232,28 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) { } if s.rev { - flush(w, padding, filling) - } else { - flush(w, filling, padding) + filling, padding = padding, filling + } + err = flush(w, filling, padding) + if err != nil { + return err } + _, err = w.Write(s.components[iRbound].bytes) + return err } -func flush(w io.Writer, filling, padding [][]byte) { +func flush(w io.Writer, filling, padding [][]byte) error { for i := len(filling) - 1; i >= 0; i-- { - mustWrite(w, filling[i]) + _, err := w.Write(filling[i]) + if err != nil { + return err + } } for i := 0; i < len(padding); i++ { - mustWrite(w, padding[i]) - } -} - -func mustWrite(w io.Writer, p []byte) { - _, err := w.Write(p) - if err != nil { - panic(err) + _, err := w.Write(padding[i]) + if err != nil { + return err + } } + return nil } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go similarity index 60% rename from vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go rename to vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go index 1a7086fec..f7947f1dc 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_nop.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go @@ -3,12 +3,14 @@ package mpb import ( "io" - "github.com/vbauerster/mpb/v7/decor" + "github.com/vbauerster/mpb/v8/decor" ) // NopStyle provides BarFillerBuilder which builds NOP BarFiller. func NopStyle() BarFillerBuilder { return BarFillerBuilderFunc(func() BarFiller { - return BarFillerFunc(func(io.Writer, int, decor.Statistics) {}) + return BarFillerFunc(func(io.Writer, decor.Statistics) error { + return nil + }) }) } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go similarity index 85% rename from vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go rename to vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go index d38525efc..a28470d6e 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go @@ -6,8 +6,8 @@ import ( "github.com/acarl005/stripansi" "github.com/mattn/go-runewidth" - "github.com/vbauerster/mpb/v7/decor" - "github.com/vbauerster/mpb/v7/internal" + "github.com/vbauerster/mpb/v8/decor" + "github.com/vbauerster/mpb/v8/internal" ) const ( @@ -63,17 +63,16 @@ func (s *spinnerStyle) Build() BarFiller { return sf } -func (s *sFiller) Fill(w io.Writer, width int, stat decor.Statistics) { - width = internal.CheckRequestedWidth(width, stat.AvailableWidth) +func (s *sFiller) Fill(w io.Writer, stat decor.Statistics) (err error) { + width := internal.CheckRequestedWidth(stat.RequestedWidth, stat.AvailableWidth) frame := s.frames[s.count%uint(len(s.frames))] frameWidth := runewidth.StringWidth(stripansi.Strip(frame)) if width < frameWidth { - return + return nil } - var err error rest := width - frameWidth switch s.position { case positionLeft: @@ -84,8 +83,6 @@ func (s *sFiller) Fill(w io.Writer, width int, stat decor.Statistics) { str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2) _, err = io.WriteString(w, str) } - if err != nil { - panic(err) - } s.count++ + return err } diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_option.go b/vendor/github.com/vbauerster/mpb/v8/bar_option.go similarity index 59% rename from vendor/github.com/vbauerster/mpb/v7/bar_option.go rename to vendor/github.com/vbauerster/mpb/v8/bar_option.go index 3506ed2f1..024f2e108 100644 --- a/vendor/github.com/vbauerster/mpb/v7/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_option.go @@ -4,44 +4,40 @@ import ( "bytes" "io" - "github.com/vbauerster/mpb/v7/decor" + "github.com/vbauerster/mpb/v8/decor" ) // BarOption is a func option to alter default behavior of a bar. type BarOption func(*bState) -func skipNil(decorators []decor.Decorator) (filtered []decor.Decorator) { - for _, d := range decorators { - if d != nil { - filtered = append(filtered, d) - } - } - return -} - -func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Decorator) { - type mergeWrapper interface { - MergeUnwrap() []decor.Decorator - } +func inspect(decorators []decor.Decorator) (dest []decor.Decorator) { for _, decorator := range decorators { - if mw, ok := decorator.(mergeWrapper); ok { - *dest = append(*dest, mw.MergeUnwrap()...) + if decorator == nil { + continue } - *dest = append(*dest, decorator) + if d, ok := decorator.(interface { + PlaceHolders() []decor.Decorator + }); ok { + dest = append(dest, d.PlaceHolders()...) + } + dest = append(dest, decorator) } + return } // AppendDecorators let you inject decorators to the bar's right side. func AppendDecorators(decorators ...decor.Decorator) BarOption { + decorators = inspect(decorators) return func(s *bState) { - s.addDecorators(&s.aDecorators, skipNil(decorators)...) + s.aDecorators = decorators } } // PrependDecorators let you inject decorators to the bar's left side. func PrependDecorators(decorators ...decor.Decorator) BarOption { + decorators = inspect(decorators) return func(s *bState) { - s.addDecorators(&s.pDecorators, skipNil(decorators)...) + s.pDecorators = decorators } } @@ -62,15 +58,9 @@ func BarWidth(width int) BarOption { // BarQueueAfter puts this (being constructed) bar into the queue. // BarPriority will be inherited from the argument bar. // When argument bar completes or aborts queued bar replaces its place. -// If sync is true queued bar is suspended until argument bar completes -// or aborts. -func BarQueueAfter(bar *Bar, sync bool) BarOption { - if bar == nil { - return nil - } +func BarQueueAfter(bar *Bar) BarOption { return func(s *bState) { - s.wait.bar = bar - s.wait.sync = sync + s.waitBar = bar } } @@ -78,7 +68,7 @@ func BarQueueAfter(bar *Bar, sync bool) BarOption { // on complete event. func BarRemoveOnComplete() BarOption { return func(s *bState) { - s.dropOnComplete = true + s.rmOnComplete = true } } @@ -91,15 +81,12 @@ func BarFillerClearOnComplete() BarOption { // BarFillerOnComplete replaces bar's filler with message, on complete event. func BarFillerOnComplete(message string) BarOption { return BarFillerMiddleware(func(base BarFiller) BarFiller { - return BarFillerFunc(func(w io.Writer, reqWidth int, st decor.Statistics) { + return BarFillerFunc(func(w io.Writer, st decor.Statistics) error { if st.Completed { _, err := io.WriteString(w, message) - if err != nil { - panic(err) - } - } else { - base.Fill(w, reqWidth, st) + return err } + return base.Fill(w, st) }) }) } @@ -107,7 +94,10 @@ func BarFillerOnComplete(message string) BarOption { // BarFillerMiddleware provides a way to augment the underlying BarFiller. func BarFillerMiddleware(middle func(BarFiller) BarFiller) BarOption { return func(s *bState) { - s.middleware = middle + if middle == nil { + return + } + s.filler = middle(s.filler) } } @@ -121,32 +111,26 @@ func BarPriority(priority int) BarOption { // BarExtender extends bar with arbitrary lines. Provided BarFiller will be // called at each render/flush cycle. Any lines written to the underlying -// io.Writer will be printed after the bar itself. -func BarExtender(filler BarFiller) BarOption { - return barExtender(filler, false) -} - -// BarExtenderRev extends bar with arbitrary lines in reverse order. Provided -// BarFiller will be called at each render/flush cycle. Any lines written -// to the underlying io.Writer will be printed before the bar itself. -func BarExtenderRev(filler BarFiller) BarOption { - return barExtender(filler, true) -} - -func barExtender(filler BarFiller, rev bool) BarOption { +// io.Writer will extend the bar either in above (rev = true) or below +// (rev = false) direction. +func BarExtender(filler BarFiller, rev bool) BarOption { if filler == nil { return nil } + fn := makeExtenderFunc(filler, rev) return func(s *bState) { - s.extender = makeExtenderFunc(filler, rev) + s.extender = fn } } func makeExtenderFunc(filler BarFiller, rev bool) extenderFunc { buf := new(bytes.Buffer) - base := func(rows []io.Reader, width int, stat decor.Statistics) []io.Reader { - buf.Reset() - filler.Fill(buf, width, stat) + base := func(rows []io.Reader, stat decor.Statistics) ([]io.Reader, error) { + err := filler.Fill(buf, stat) + if err != nil { + buf.Reset() + return rows, err + } for { b, err := buf.ReadBytes('\n') if err != nil { @@ -154,19 +138,22 @@ func makeExtenderFunc(filler BarFiller, rev bool) extenderFunc { } rows = append(rows, bytes.NewReader(b)) } - return rows + buf.Reset() + return rows, err } if !rev { return base - } else { - return func(rows []io.Reader, width int, stat decor.Statistics) []io.Reader { - rows = base(rows, width, stat) - for left, right := 0, len(rows)-1; left < right; left, right = left+1, right-1 { - rows[left], rows[right] = rows[right], rows[left] - } - return rows + } + return func(rows []io.Reader, stat decor.Statistics) ([]io.Reader, error) { + rows, err := base(rows, stat) + if err != nil { + return rows, err } + for left, right := 0, len(rows)-1; left < right; left, right = left+1, right-1 { + rows[left], rows[right] = rows[right], rows[left] + } + return rows, err } } @@ -185,7 +172,7 @@ func BarNoPop() BarOption { } } -// BarOptional will invoke provided option only when cond is true. +// BarOptional will return provided option only when cond is true. func BarOptional(option BarOption, cond bool) BarOption { if cond { return option @@ -193,11 +180,26 @@ func BarOptional(option BarOption, cond bool) BarOption { return nil } -// BarOptOn will invoke provided option only when higher order predicate -// evaluates to true. +// BarOptOn will return provided option only when predicate evaluates to true. func BarOptOn(option BarOption, predicate func() bool) BarOption { if predicate() { return option } return nil } + +// BarFuncOptional will call option and return its value only when cond is true. +func BarFuncOptional(option func() BarOption, cond bool) BarOption { + if cond { + return option() + } + return nil +} + +// BarFuncOptOn will call option and return its value only when predicate evaluates to true. +func BarFuncOptOn(option func() BarOption, predicate func() bool) BarOption { + if predicate() { + return option() + } + return nil +} diff --git a/vendor/github.com/vbauerster/mpb/v7/container_option.go b/vendor/github.com/vbauerster/mpb/v8/container_option.go similarity index 60% rename from vendor/github.com/vbauerster/mpb/v7/container_option.go rename to vendor/github.com/vbauerster/mpb/v8/container_option.go index 38239d4fa..f2ab01ee0 100644 --- a/vendor/github.com/vbauerster/mpb/v7/container_option.go +++ b/vendor/github.com/vbauerster/mpb/v8/container_option.go @@ -33,7 +33,7 @@ func WithWidth(width int) ContainerOption { // WithRefreshRate overrides default 150ms refresh rate. func WithRefreshRate(d time.Duration) ContainerOption { return func(s *pState) { - s.rr = d + s.refreshRate = d } } @@ -41,7 +41,7 @@ func WithRefreshRate(d time.Duration) ContainerOption { // Refresh will occur upon receive value from provided ch. func WithManualRefresh(ch <-chan interface{}) ContainerOption { return func(s *pState) { - s.externalRefresh = ch + s.manualRC = ch } } @@ -51,32 +51,26 @@ func WithManualRefresh(ch <-chan interface{}) ContainerOption { // rendering will start as soon as provided chan is closed. func WithRenderDelay(ch <-chan struct{}) ContainerOption { return func(s *pState) { - s.renderDelay = ch + s.delayRC = ch } } -// WithShutdownNotifier provided chanel will be closed, after all bars -// have been rendered. -func WithShutdownNotifier(ch chan struct{}) ContainerOption { +// WithShutdownNotifier value of type `[]*mpb.Bar` will be send into provided +// channel upon container shutdown. +func WithShutdownNotifier(ch chan<- interface{}) ContainerOption { return func(s *pState) { - select { - case <-ch: - default: - s.shutdownNotifier = ch - } + s.shutdownNotifier = ch } } -// WithOutput overrides default os.Stdout output. Setting it to nil -// will effectively disable auto refresh rate and discard any output, -// useful if you want to disable progress bars with little overhead. +// WithOutput overrides default os.Stdout output. If underlying io.Writer +// is not a terminal then auto refresh is disabled unless WithAutoRefresh +// option is set. func WithOutput(w io.Writer) ContainerOption { + if w == nil { + w = io.Discard + } return func(s *pState) { - if w == nil { - s.output = io.Discard - s.outputDiscarded = true - return - } s.output = w } } @@ -84,21 +78,31 @@ func WithOutput(w io.Writer) ContainerOption { // WithDebugOutput sets debug output. func WithDebugOutput(w io.Writer) ContainerOption { if w == nil { - return nil + w = io.Discard } return func(s *pState) { s.debugOut = w } } -// PopCompletedMode will pop and stop rendering completed bars. +// WithAutoRefresh force auto refresh regardless of what output is set to. +// Applicable only if not WithManualRefresh set. +func WithAutoRefresh() ContainerOption { + return func(s *pState) { + s.autoRefresh = true + } +} + +// PopCompletedMode will pop completed bars to the top. +// To stop rendering bar after it has been popped, use +// mpb.BarRemoveOnComplete() option on that bar. func PopCompletedMode() ContainerOption { return func(s *pState) { s.popCompleted = true } } -// ContainerOptional will invoke provided option only when cond is true. +// ContainerOptional will return provided option only when cond is true. func ContainerOptional(option ContainerOption, cond bool) ContainerOption { if cond { return option @@ -106,11 +110,26 @@ func ContainerOptional(option ContainerOption, cond bool) ContainerOption { return nil } -// ContainerOptOn will invoke provided option only when higher order -// predicate evaluates to true. +// ContainerOptOn will return provided option only when predicate evaluates to true. func ContainerOptOn(option ContainerOption, predicate func() bool) ContainerOption { if predicate() { return option } return nil } + +// ContainerFuncOptional will call option and return its value only when cond is true. +func ContainerFuncOptional(option func() ContainerOption, cond bool) ContainerOption { + if cond { + return option() + } + return nil +} + +// ContainerFuncOptOn will call option and return its value only when predicate evaluates to true. +func ContainerFuncOptOn(option func() ContainerOption, predicate func() bool) ContainerOption { + if predicate() { + return option() + } + return nil +} diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/doc.go b/vendor/github.com/vbauerster/mpb/v8/cwriter/doc.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v7/cwriter/doc.go rename to vendor/github.com/vbauerster/mpb/v8/cwriter/doc.go diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/util_bsd.go b/vendor/github.com/vbauerster/mpb/v8/cwriter/util_bsd.go similarity index 58% rename from vendor/github.com/vbauerster/mpb/v7/cwriter/util_bsd.go rename to vendor/github.com/vbauerster/mpb/v8/cwriter/util_bsd.go index 4e3564ece..215643b45 100644 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/util_bsd.go +++ b/vendor/github.com/vbauerster/mpb/v8/cwriter/util_bsd.go @@ -1,4 +1,4 @@ -// +build darwin dragonfly freebsd netbsd openbsd +//go:build darwin || dragonfly || freebsd || netbsd || openbsd package cwriter diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/util_linux.go b/vendor/github.com/vbauerster/mpb/v8/cwriter/util_linux.go similarity index 78% rename from vendor/github.com/vbauerster/mpb/v7/cwriter/util_linux.go rename to vendor/github.com/vbauerster/mpb/v8/cwriter/util_linux.go index 253f12dd2..7d0e76123 100644 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/util_linux.go +++ b/vendor/github.com/vbauerster/mpb/v8/cwriter/util_linux.go @@ -1,4 +1,4 @@ -// +build aix linux +//go:build aix || linux package cwriter diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/util_solaris.go b/vendor/github.com/vbauerster/mpb/v8/cwriter/util_solaris.go similarity index 82% rename from vendor/github.com/vbauerster/mpb/v7/cwriter/util_solaris.go rename to vendor/github.com/vbauerster/mpb/v8/cwriter/util_solaris.go index 4b29ff5c0..981f574f4 100644 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/util_solaris.go +++ b/vendor/github.com/vbauerster/mpb/v8/cwriter/util_solaris.go @@ -1,4 +1,4 @@ -// +build solaris +//go:build solaris package cwriter diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/util_zos.go b/vendor/github.com/vbauerster/mpb/v8/cwriter/util_zos.go similarity index 85% rename from vendor/github.com/vbauerster/mpb/v7/cwriter/util_zos.go rename to vendor/github.com/vbauerster/mpb/v8/cwriter/util_zos.go index b7d67fc43..5daf003a3 100644 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/util_zos.go +++ b/vendor/github.com/vbauerster/mpb/v8/cwriter/util_zos.go @@ -1,4 +1,4 @@ -// +build zos +//go:build zos package cwriter diff --git a/vendor/github.com/vbauerster/mpb/v8/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v8/cwriter/writer.go new file mode 100644 index 000000000..23a72d3ec --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v8/cwriter/writer.go @@ -0,0 +1,59 @@ +package cwriter + +import ( + "bytes" + "errors" + "io" + "os" + "strconv" +) + +// https://github.com/dylanaraps/pure-sh-bible#cursor-movement +const ( + escOpen = "\x1b[" + cuuAndEd = "A\x1b[J" +) + +// ErrNotTTY not a TeleTYpewriter error. +var ErrNotTTY = errors.New("not a terminal") + +// New returns a new Writer with defaults. +func New(out io.Writer) *Writer { + w := &Writer{ + Buffer: new(bytes.Buffer), + out: out, + termSize: func(_ int) (int, int, error) { + return -1, -1, ErrNotTTY + }, + } + if f, ok := out.(*os.File); ok { + w.fd = int(f.Fd()) + if IsTerminal(w.fd) { + w.terminal = true + w.termSize = func(fd int) (int, int, error) { + return GetSize(fd) + } + } + } + bb := make([]byte, 16) + w.ew = escWriter(bb[:copy(bb, []byte(escOpen))]) + return w +} + +// IsTerminal tells whether underlying io.Writer is terminal. +func (w *Writer) IsTerminal() bool { + return w.terminal +} + +// GetTermSize returns WxH of underlying terminal. +func (w *Writer) GetTermSize() (width, height int, err error) { + return w.termSize(w.fd) +} + +type escWriter []byte + +func (b escWriter) ansiCuuAndEd(out io.Writer, n int) error { + b = strconv.AppendInt(b, int64(n), 10) + _, err := out.Write(append(b, []byte(cuuAndEd)...)) + return err +} diff --git a/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_posix.go b/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_posix.go new file mode 100644 index 000000000..e80d757af --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_posix.go @@ -0,0 +1,48 @@ +//go:build !windows + +package cwriter + +import ( + "bytes" + "io" + + "golang.org/x/sys/unix" +) + +// Writer is a buffered terminal writer, which moves cursor N lines up +// on each flush except the first one, where N is a number of lines of +// a previous flush. +type Writer struct { + *bytes.Buffer + out io.Writer + ew escWriter + fd int + terminal bool + termSize func(int) (int, int, error) +} + +// Flush flushes the underlying buffer. +// It's caller's responsibility to pass correct number of lines. +func (w *Writer) Flush(lines int) error { + _, err := w.WriteTo(w.out) + // some terminals interpret 'cursor up 0' as 'cursor up 1' + if err == nil && lines > 0 { + err = w.ew.ansiCuuAndEd(w, lines) + } + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return -1, -1, err + } + return int(ws.Col), int(ws.Row), nil +} + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go b/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_windows.go similarity index 70% rename from vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go rename to vendor/github.com/vbauerster/mpb/v8/cwriter/writer_windows.go index 2c4c3707b..44293f26a 100644 --- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer_windows.go +++ b/vendor/github.com/vbauerster/mpb/v8/cwriter/writer_windows.go @@ -1,8 +1,10 @@ -// +build windows +//go:build windows package cwriter import ( + "bytes" + "io" "unsafe" "golang.org/x/sys/windows" @@ -15,10 +17,37 @@ var ( procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") ) -func (w *Writer) clearLines() error { +// Writer is a buffered terminal writer, which moves cursor N lines up +// on each flush except the first one, where N is a number of lines of +// a previous flush. +type Writer struct { + *bytes.Buffer + out io.Writer + ew escWriter + lines int + fd int + terminal bool + termSize func(int) (int, int, error) +} + +// Flush flushes the underlying buffer. +// It's caller's responsibility to pass correct number of lines. +func (w *Writer) Flush(lines int) error { + if w.lines > 0 { + err := w.clearLines(w.lines) + if err != nil { + return err + } + } + w.lines = lines + _, err := w.WriteTo(w.out) + return err +} + +func (w *Writer) clearLines(n int) error { if !w.terminal { // hope it's cygwin or similar - return w.ansiCuuAndEd() + return w.ew.ansiCuuAndEd(w.out, n) } var info windows.ConsoleScreenBufferInfo @@ -26,7 +55,7 @@ func (w *Writer) clearLines() error { return err } - info.CursorPosition.Y -= int16(w.lines) + info.CursorPosition.Y -= int16(n) if info.CursorPosition.Y < 0 { info.CursorPosition.Y = 0 } @@ -40,7 +69,7 @@ func (w *Writer) clearLines() error { X: info.Window.Left, Y: info.CursorPosition.Y, } - count := uint32(info.Size.X) * uint32(w.lines) + count := uint32(info.Size.X) * uint32(n) _, _, _ = procFillConsoleOutputCharacter.Call( uintptr(w.fd), uintptr(' '), @@ -52,7 +81,6 @@ func (w *Writer) clearLines() error { } // GetSize returns the visible dimensions of the given terminal. -// // These dimensions don't include any scrollback buffer height. func GetSize(fd int) (width, height int, err error) { var info windows.ConsoleScreenBufferInfo diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/any.go b/vendor/github.com/vbauerster/mpb/v8/decor/any.go similarity index 92% rename from vendor/github.com/vbauerster/mpb/v7/decor/any.go rename to vendor/github.com/vbauerster/mpb/v8/decor/any.go index 39518f594..23ad75e94 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/any.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/any.go @@ -1,12 +1,13 @@ package decor +var _ Decorator = (*any)(nil) + // Any decorator displays text, that can be changed during decorator's // lifetime via provided DecorFunc. // // `fn` DecorFunc callback // // `wcc` optional WC config -// func Any(fn DecorFunc, wcc ...WC) Decorator { return &any{initWC(wcc...), fn} } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/counters.go b/vendor/github.com/vbauerster/mpb/v8/decor/counters.go similarity index 99% rename from vendor/github.com/vbauerster/mpb/v7/decor/counters.go rename to vendor/github.com/vbauerster/mpb/v8/decor/counters.go index 4a5343d41..331c3df67 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/counters.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/counters.go @@ -42,7 +42,6 @@ func CountersKiloByte(pairFmt string, wcc ...WC) Decorator { // pairFmt="% .1f / % .1f" output: "1.0 MB / 12.0 MB" // pairFmt="%d / %d" output: "1MB / 12MB" // pairFmt="% d / % d" output: "1 MB / 12 MB" -// func Counters(unit int, pairFmt string, wcc ...WC) Decorator { producer := func(unit int, pairFmt string) DecorFunc { if pairFmt == "" { @@ -99,7 +98,6 @@ func TotalKiloByte(format string, wcc ...WC) Decorator { // format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" -// func Total(unit int, format string, wcc ...WC) Decorator { producer := func(unit int, format string) DecorFunc { if format == "" { @@ -157,7 +155,6 @@ func CurrentKiloByte(format string, wcc ...WC) Decorator { // format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" -// func Current(unit int, format string, wcc ...WC) Decorator { producer := func(unit int, format string) DecorFunc { if format == "" { @@ -215,7 +212,6 @@ func InvertedCurrentKiloByte(format string, wcc ...WC) Decorator { // format="% .1f" output: "12.0 MiB" // format="%d" output: "12MiB" // format="% d" output: "12 MiB" -// func InvertedCurrent(unit int, format string, wcc ...WC) Decorator { producer := func(unit int, format string) DecorFunc { if format == "" { diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go similarity index 88% rename from vendor/github.com/vbauerster/mpb/v7/decor/decorator.go rename to vendor/github.com/vbauerster/mpb/v8/decor/decorator.go index aad7709c0..9e6975712 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/decorator.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go @@ -44,10 +44,11 @@ const ( ET_STYLE_MMSS ) -// Statistics consists of progress related statistics, that Decorator -// may need. +// Statistics contains fields which are necessary for implementing +// `decor.Decorator` and `mpb.BarFiller` interfaces. type Statistics struct { - AvailableWidth int + AvailableWidth int // calculated width initially equal to terminal width + RequestedWidth int // width set by `mpb.WithWidth` ID int Total int64 Current int64 @@ -92,7 +93,7 @@ type Configurator interface { // it is necessary to implement this interface to retain functionality // of built-in Decorator. type Wrapper interface { - Base() Decorator + Unwrap() Decorator } // EwmaDecorator interface. @@ -112,7 +113,7 @@ type AverageDecorator interface { // If decorator needs to be notified once upon bar shutdown event, so // this is the right interface to implement. type ShutdownListener interface { - Shutdown() + OnShutdown() } // Global convenience instances of WC with sync width bit set. @@ -136,26 +137,27 @@ type WC struct { // FormatMsg formats final message according to WC.W and WC.C. // Should be called by any Decorator implementation. -func (wc *WC) FormatMsg(msg string) string { +func (wc WC) FormatMsg(msg string) string { pureWidth := runewidth.StringWidth(msg) - stripWidth := runewidth.StringWidth(stripansi.Strip(msg)) - maxCell := wc.W + viewWidth := runewidth.StringWidth(stripansi.Strip(msg)) + max := wc.W if (wc.C & DSyncWidth) != 0 { - cellCount := stripWidth + viewWidth := viewWidth if (wc.C & DextraSpace) != 0 { - cellCount++ + viewWidth++ } - wc.wsync <- cellCount - maxCell = <-wc.wsync + wc.wsync <- viewWidth + max = <-wc.wsync } - return wc.fill(msg, maxCell+(pureWidth-stripWidth)) + return wc.fill(msg, max-viewWidth+pureWidth) } // Init initializes width related config. func (wc *WC) Init() WC { - wc.fill = runewidth.FillLeft if (wc.C & DidentRight) != 0 { wc.fill = runewidth.FillRight + } else { + wc.fill = runewidth.FillLeft } if (wc.C & DSyncWidth) != 0 { // it's deliberate choice to override wsync on each Init() call, @@ -166,7 +168,7 @@ func (wc *WC) Init() WC { } // Sync is implementation of Synchronizer interface. -func (wc *WC) Sync() (chan int, bool) { +func (wc WC) Sync() (chan int, bool) { if (wc.C&DSyncWidth) != 0 && wc.wsync == nil { panic(fmt.Sprintf("%T is not initialized", wc)) } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/doc.go b/vendor/github.com/vbauerster/mpb/v8/decor/doc.go similarity index 75% rename from vendor/github.com/vbauerster/mpb/v7/decor/doc.go rename to vendor/github.com/vbauerster/mpb/v8/decor/doc.go index 4e4299307..d41aa5061 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/doc.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/doc.go @@ -1,4 +1,4 @@ -// Package decor provides common decorators for "github.com/vbauerster/mpb/v7" module. +// Package decor provides common decorators for "github.com/vbauerster/mpb/v8" module. // // Some decorators returned by this package might have a closure state. It is ok to use // decorators concurrently, unless you share the same decorator among multiple @@ -6,10 +6,10 @@ // // Don't: // -// p := mpb.New() -// name := decor.Name("bar") -// p.AddBar(100, mpb.AppendDecorators(name)) -// p.AddBar(100, mpb.AppendDecorators(name)) +// p := mpb.New() +// name := decor.Name("bar") +// p.AddBar(100, mpb.AppendDecorators(name)) +// p.AddBar(100, mpb.AppendDecorators(name)) // // Do: // diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/elapsed.go b/vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go similarity index 99% rename from vendor/github.com/vbauerster/mpb/v7/decor/elapsed.go rename to vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go index e389f1581..6cee7d1d4 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/elapsed.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/elapsed.go @@ -9,7 +9,6 @@ import ( // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // // `wcc` optional WC config -// func Elapsed(style TimeStyle, wcc ...WC) Decorator { return NewElapsed(style, time.Now(), wcc...) } @@ -21,7 +20,6 @@ func Elapsed(style TimeStyle, wcc ...WC) Decorator { // `startTime` start time // // `wcc` optional WC config -// func NewElapsed(style TimeStyle, startTime time.Time, wcc ...WC) Decorator { var msg string producer := chooseTimeProducer(style) diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/eta.go b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go similarity index 94% rename from vendor/github.com/vbauerster/mpb/v7/decor/eta.go rename to vendor/github.com/vbauerster/mpb/v8/decor/eta.go index d03caa735..3594e0185 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/eta.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go @@ -8,6 +8,13 @@ import ( "github.com/VividCortex/ewma" ) +var ( + _ Decorator = (*movingAverageETA)(nil) + _ EwmaDecorator = (*movingAverageETA)(nil) + _ Decorator = (*averageETA)(nil) + _ AverageDecorator = (*averageETA)(nil) +) + // TimeNormalizer interface. Implementors could be passed into // MovingAverageETA, in order to affect i.e. normalize its output. type TimeNormalizer interface { @@ -22,10 +29,9 @@ func (f TimeNormalizerFunc) Normalize(src time.Duration) time.Duration { return f(src) } -// EwmaETA exponential-weighted-moving-average based ETA decorator. -// For this decorator to work correctly you have to measure each -// iteration's duration and pass it to the -// *Bar.DecoratorEwmaUpdate(time.Duration) method after each increment. +// EwmaETA exponential-weighted-moving-average based ETA decorator. For this +// decorator to work correctly you have to measure each iteration's duration +// and pass it to one of the (*Bar).EwmaIncr... family methods. func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator { var average ewma.MovingAverage if age == 0 { @@ -45,7 +51,6 @@ func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator { // `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] // // `wcc` optional WC config -// func MovingAverageETA(style TimeStyle, average ewma.MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator { d := &movingAverageETA{ WC: initWC(wcc...), @@ -85,7 +90,6 @@ func (d *movingAverageETA) EwmaUpdate(n int64, dur time.Duration) { // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // // `wcc` optional WC config -// func AverageETA(style TimeStyle, wcc ...WC) Decorator { return NewAverageETA(style, time.Now(), nil, wcc...) } @@ -99,7 +103,6 @@ func AverageETA(style TimeStyle, wcc ...WC) Decorator { // `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] // // `wcc` optional WC config -// func NewAverageETA(style TimeStyle, startTime time.Time, normalizer TimeNormalizer, wcc ...WC) Decorator { d := &averageETA{ WC: initWC(wcc...), diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go b/vendor/github.com/vbauerster/mpb/v8/decor/merge.go similarity index 70% rename from vendor/github.com/vbauerster/mpb/v7/decor/merge.go rename to vendor/github.com/vbauerster/mpb/v8/decor/merge.go index 847671155..02b7a9308 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/merge.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/merge.go @@ -7,15 +7,20 @@ import ( "github.com/mattn/go-runewidth" ) +var ( + _ Decorator = (*mergeDecorator)(nil) + _ Wrapper = (*mergeDecorator)(nil) + _ Decorator = (*placeHolderDecorator)(nil) +) + // Merge wraps its decorator argument with intention to sync width // with several decorators of another bar. Visual example: // -// +----+--------+---------+--------+ -// | B1 | MERGE(D, P1, Pn) | -// +----+--------+---------+--------+ -// | B2 | D0 | D1 | Dn | -// +----+--------+---------+--------+ -// +// +----+--------+---------+--------+ +// | B1 | MERGE(D, P1, Pn) | +// +----+--------+---------+--------+ +// | B2 | D0 | D1 | Dn | +// +----+--------+---------+--------+ func Merge(decorator Decorator, placeholders ...WC) Decorator { if decorator == nil { return nil @@ -26,7 +31,7 @@ func Merge(decorator Decorator, placeholders ...WC) Decorator { md := &mergeDecorator{ Decorator: decorator, wc: decorator.GetConf(), - placeHolders: make([]*placeHolderDecorator, len(placeholders)), + placeHolders: make([]Decorator, len(placeholders)), } decorator.SetConf(WC{}) for i, wc := range placeholders { @@ -41,7 +46,7 @@ func Merge(decorator Decorator, placeholders ...WC) Decorator { type mergeDecorator struct { Decorator wc WC - placeHolders []*placeHolderDecorator + placeHolders []Decorator } func (d *mergeDecorator) GetConf() WC { @@ -52,19 +57,15 @@ func (d *mergeDecorator) SetConf(conf WC) { d.wc = conf.Init() } -func (d *mergeDecorator) MergeUnwrap() []Decorator { - decorators := make([]Decorator, len(d.placeHolders)) - for i, ph := range d.placeHolders { - decorators[i] = ph - } - return decorators +func (d *mergeDecorator) PlaceHolders() []Decorator { + return d.placeHolders } func (d *mergeDecorator) Sync() (chan int, bool) { return d.wc.Sync() } -func (d *mergeDecorator) Base() Decorator { +func (d *mergeDecorator) Unwrap() Decorator { return d.Decorator } @@ -77,21 +78,21 @@ func (d *mergeDecorator) Decor(s Statistics) string { cellCount++ } - total := runewidth.StringWidth(d.placeHolders[0].FormatMsg("")) + total := runewidth.StringWidth(d.placeHolders[0].GetConf().FormatMsg("")) pw := (cellCount - total) / len(d.placeHolders) rem := (cellCount - total) % len(d.placeHolders) var diff int for i := 1; i < len(d.placeHolders); i++ { - ph := d.placeHolders[i] + wc := d.placeHolders[i].GetConf() width := pw - diff - if (ph.WC.C & DextraSpace) != 0 { + if (wc.C & DextraSpace) != 0 { width-- if width < 0 { width = 0 } } - max := runewidth.StringWidth(ph.FormatMsg(strings.Repeat(" ", width))) + max := runewidth.StringWidth(wc.FormatMsg(strings.Repeat(" ", width))) total += max diff = max - pw } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/moving_average.go b/vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go similarity index 90% rename from vendor/github.com/vbauerster/mpb/v7/decor/moving_average.go rename to vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go index 50ac9c393..a1be8ada2 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/moving_average.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go @@ -7,6 +7,12 @@ import ( "github.com/VividCortex/ewma" ) +var ( + _ ewma.MovingAverage = (*threadSafeMovingAverage)(nil) + _ ewma.MovingAverage = (*medianWindow)(nil) + _ sort.Interface = (*medianWindow)(nil) +) + type threadSafeMovingAverage struct { ewma.MovingAverage mu sync.Mutex diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/name.go b/vendor/github.com/vbauerster/mpb/v8/decor/name.go similarity index 98% rename from vendor/github.com/vbauerster/mpb/v7/decor/name.go rename to vendor/github.com/vbauerster/mpb/v8/decor/name.go index 3af311254..31ac123b5 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/name.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/name.go @@ -6,7 +6,6 @@ package decor // `str` string to display // // `wcc` optional WC config -// func Name(str string, wcc ...WC) Decorator { return Any(func(Statistics) string { return str }, wcc...) } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go similarity index 86% rename from vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go rename to vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go index 10ff67009..862ae33d2 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/on_abort.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go @@ -1,5 +1,10 @@ package decor +var ( + _ Decorator = (*onAbortWrapper)(nil) + _ Wrapper = (*onAbortWrapper)(nil) +) + // OnAbort returns decorator, which wraps provided decorator with sole // purpose to display provided message on abort event. It has no effect // if bar.Abort(drop bool) is called with true argument. @@ -7,7 +12,6 @@ package decor // `decorator` Decorator to wrap // // `message` message to display on abort event -// func OnAbort(decorator Decorator, message string) Decorator { if decorator == nil { return nil @@ -36,6 +40,6 @@ func (d *onAbortWrapper) Decor(s Statistics) string { return d.Decorator.Decor(s) } -func (d *onAbortWrapper) Base() Decorator { +func (d *onAbortWrapper) Unwrap() Decorator { return d.Decorator } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go similarity index 84% rename from vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go rename to vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go index 2ada2b31b..6ee926844 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/on_complete.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go @@ -1,12 +1,16 @@ package decor +var ( + _ Decorator = (*onCompleteWrapper)(nil) + _ Wrapper = (*onCompleteWrapper)(nil) +) + // OnComplete returns decorator, which wraps provided decorator with // sole purpose to display provided message on complete event. // // `decorator` Decorator to wrap // // `message` message to display on complete event -// func OnComplete(decorator Decorator, message string) Decorator { if decorator == nil { return nil @@ -35,6 +39,6 @@ func (d *onCompleteWrapper) Decor(s Statistics) string { return d.Decorator.Decor(s) } -func (d *onCompleteWrapper) Base() Decorator { +func (d *onCompleteWrapper) Unwrap() Decorator { return d.Decorator } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_condition.go similarity index 98% rename from vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go rename to vendor/github.com/vbauerster/mpb/v8/decor/on_condition.go index 74a3d9667..f4626c33b 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/on_condition.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_condition.go @@ -5,7 +5,6 @@ package decor // `decorator` Decorator // // `cond` bool -// func OnCondition(decorator Decorator, cond bool) Decorator { return Conditional(cond, decorator, nil) } @@ -15,7 +14,6 @@ func OnCondition(decorator Decorator, cond bool) Decorator { // `decorator` Decorator // // `predicate` func() bool -// func OnPredicate(decorator Decorator, predicate func() bool) Decorator { return Predicative(predicate, decorator, nil) } @@ -28,7 +26,6 @@ func OnPredicate(decorator Decorator, predicate func() bool) Decorator { // `a` Decorator // // `b` Decorator -// func Conditional(cond bool, a, b Decorator) Decorator { if cond { return a @@ -45,7 +42,6 @@ func Conditional(cond bool, a, b Decorator) Decorator { // `a` Decorator // // `b` Decorator -// func Predicative(predicate func() bool, a, b Decorator) Decorator { if predicate() { return a diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go similarity index 78% rename from vendor/github.com/vbauerster/mpb/v7/decor/percentage.go rename to vendor/github.com/vbauerster/mpb/v8/decor/percentage.go index e72668993..1d3b3a9e0 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/percentage.go @@ -4,7 +4,7 @@ import ( "fmt" "strconv" - "github.com/vbauerster/mpb/v7/internal" + "github.com/vbauerster/mpb/v8/internal" ) type percentageType float64 @@ -23,11 +23,18 @@ func (s percentageType) Format(st fmt.State, verb rune) { } } - mustWriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64)) + p := bytesPool.Get().(*[]byte) + b := strconv.AppendFloat(*p, float64(s), 'f', prec, 64) if st.Flag(' ') { - mustWriteString(st, " ") + b = append(b, ' ', '%') + } else { + b = append(b, '%') } - mustWriteString(st, "%") + _, err := st.Write(b) + if err != nil { + panic(err) + } + bytesPool.Put(p) } // Percentage returns percentage decorator. It's a wrapper of NewPercentage. @@ -43,7 +50,6 @@ func Percentage(wcc ...WC) Decorator { // format="% .1f" output: "1.0 %" // format="%d" output: "1%" // format="% d" output: "1 %" -// func NewPercentage(format string, wcc ...WC) Decorator { if format == "" { format = "% d" diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/pool.go b/vendor/github.com/vbauerster/mpb/v8/decor/pool.go new file mode 100644 index 000000000..cefa9bfac --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v8/decor/pool.go @@ -0,0 +1,10 @@ +package decor + +import "sync" + +var bytesPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, 32) + return &b + }, +} diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go similarity index 77% rename from vendor/github.com/vbauerster/mpb/v7/decor/size_type.go rename to vendor/github.com/vbauerster/mpb/v8/decor/size_type.go index 09ecc23f8..4df27095f 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go @@ -49,11 +49,17 @@ func (self SizeB1024) Format(st fmt.State, verb rune) { unit = _iTiB } - mustWriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) + p := bytesPool.Get().(*[]byte) + b := strconv.AppendFloat(*p, float64(self)/float64(unit), 'f', prec, 64) if st.Flag(' ') { - mustWriteString(st, " ") + b = append(b, ' ') } - mustWriteString(st, unit.String()) + b = append(b, []byte(unit.String())...) + _, err := st.Write(b) + if err != nil { + panic(err) + } + bytesPool.Put(p) } const ( @@ -97,9 +103,15 @@ func (self SizeB1000) Format(st fmt.State, verb rune) { unit = _TB } - mustWriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64)) + p := bytesPool.Get().(*[]byte) + b := strconv.AppendFloat(*p, float64(self)/float64(unit), 'f', prec, 64) if st.Flag(' ') { - mustWriteString(st, " ") + b = append(b, ' ') + } + b = append(b, []byte(unit.String())...) + _, err := st.Write(b) + if err != nil { + panic(err) } - mustWriteString(st, unit.String()) + bytesPool.Put(p) } diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/sizeb1000_string.go b/vendor/github.com/vbauerster/mpb/v8/decor/sizeb1000_string.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v7/decor/sizeb1000_string.go rename to vendor/github.com/vbauerster/mpb/v8/decor/sizeb1000_string.go diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/sizeb1024_string.go b/vendor/github.com/vbauerster/mpb/v8/decor/sizeb1024_string.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v7/decor/sizeb1024_string.go rename to vendor/github.com/vbauerster/mpb/v8/decor/sizeb1024_string.go diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go similarity index 90% rename from vendor/github.com/vbauerster/mpb/v7/decor/speed.go rename to vendor/github.com/vbauerster/mpb/v8/decor/speed.go index f052352fc..dd0ad7001 100644 --- a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go @@ -2,34 +2,43 @@ package decor import ( "fmt" + "io" "math" "time" "github.com/VividCortex/ewma" ) +var ( + _ Decorator = (*movingAverageSpeed)(nil) + _ EwmaDecorator = (*movingAverageSpeed)(nil) + _ Decorator = (*averageSpeed)(nil) + _ AverageDecorator = (*averageSpeed)(nil) +) + // FmtAsSpeed adds "/s" to the end of the input formatter. To be // used with SizeB1000 or SizeB1024 types, for example: // // fmt.Printf("%.1f", FmtAsSpeed(SizeB1024(2048))) -// func FmtAsSpeed(input fmt.Formatter) fmt.Formatter { - return &speedFormatter{input} + return speedFormatter{input} } type speedFormatter struct { fmt.Formatter } -func (self *speedFormatter) Format(st fmt.State, verb rune) { +func (self speedFormatter) Format(st fmt.State, verb rune) { self.Formatter.Format(st, verb) - mustWriteString(st, "/s") + _, err := io.WriteString(st, "/s") + if err != nil { + panic(err) + } } // EwmaSpeed exponential-weighted-moving-average based speed decorator. -// For this decorator to work correctly you have to measure each -// iteration's duration and pass it to the -// *Bar.DecoratorEwmaUpdate(time.Duration) method after each increment. +// For this decorator to work correctly you have to measure each iteration's +// duration and pass it to one of the (*Bar).EwmaIncr... family methods. func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator { var average ewma.MovingAverage if age == 0 { @@ -57,7 +66,6 @@ func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator { // unit=UnitKiB, format="% .1f" output: "1.0 MiB/s" // unit=UnitKB, format="%.1f" output: "1.0MB/s" // unit=UnitKB, format="% .1f" output: "1.0 MB/s" -// func MovingAverageSpeed(unit int, format string, average ewma.MovingAverage, wcc ...WC) Decorator { if format == "" { format = "%.0f" @@ -119,7 +127,6 @@ func AverageSpeed(unit int, format string, wcc ...WC) Decorator { // unit=UnitKiB, format="% .1f" output: "1.0 MiB/s" // unit=UnitKB, format="%.1f" output: "1.0MB/s" // unit=UnitKB, format="% .1f" output: "1.0 MB/s" -// func NewAverageSpeed(unit int, format string, startTime time.Time, wcc ...WC) Decorator { if format == "" { format = "%.0f" diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/spinner.go b/vendor/github.com/vbauerster/mpb/v8/decor/spinner.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v7/decor/spinner.go rename to vendor/github.com/vbauerster/mpb/v8/decor/spinner.go diff --git a/vendor/github.com/vbauerster/mpb/v7/doc.go b/vendor/github.com/vbauerster/mpb/v8/doc.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v7/doc.go rename to vendor/github.com/vbauerster/mpb/v8/doc.go diff --git a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go new file mode 100644 index 000000000..678dd7c9f --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go @@ -0,0 +1,171 @@ +package mpb + +import ( + "container/heap" +) + +type heapManager chan heapRequest + +type heapCmd int + +const ( + h_sync heapCmd = iota + h_push + h_iter + h_drain + h_fix + h_state + h_end +) + +type heapRequest struct { + cmd heapCmd + data interface{} +} + +type iterData struct { + iter chan<- *Bar + drop <-chan struct{} +} + +type pushData struct { + bar *Bar + sync bool +} + +type fixData struct { + bar *Bar + priority int +} + +func (m heapManager) run() { + var bHeap priorityQueue + var pMatrix, aMatrix map[int][]chan int + + var l int + var sync bool + + for req := range m { + switch req.cmd { + case h_push: + data := req.data.(pushData) + heap.Push(&bHeap, data.bar) + if !sync { + sync = data.sync + } + case h_sync: + if sync || l != bHeap.Len() { + pMatrix = make(map[int][]chan int) + aMatrix = make(map[int][]chan int) + for _, b := range bHeap { + table := b.wSyncTable() + for i, ch := range table[0] { + pMatrix[i] = append(pMatrix[i], ch) + } + for i, ch := range table[1] { + aMatrix[i] = append(aMatrix[i], ch) + } + } + sync = false + l = bHeap.Len() + } + drop := req.data.(<-chan struct{}) + syncWidth(pMatrix, drop) + syncWidth(aMatrix, drop) + case h_iter: + data := req.data.(iterData) + for _, b := range bHeap { + select { + case data.iter <- b: + case <-data.drop: + break + } + } + close(data.iter) + case h_drain: + data := req.data.(iterData) + for bHeap.Len() != 0 { + select { + case data.iter <- heap.Pop(&bHeap).(*Bar): + case <-data.drop: + break + } + } + close(data.iter) + case h_fix: + data := req.data.(fixData) + bar, priority := data.bar, data.priority + if bar.index < 0 { + break + } + bar.priority = priority + heap.Fix(&bHeap, bar.index) + case h_state: + ch := req.data.(chan<- bool) + ch <- sync || l != bHeap.Len() + case h_end: + ch := req.data.(chan<- interface{}) + if ch != nil { + go func() { + ch <- []*Bar(bHeap) + }() + } + close(m) + } + } +} + +func (m heapManager) sync(drop <-chan struct{}) { + m <- heapRequest{cmd: h_sync, data: drop} +} + +func (m heapManager) push(b *Bar, sync bool) { + data := pushData{b, sync} + m <- heapRequest{cmd: h_push, data: data} +} + +func (m heapManager) iter(iter chan<- *Bar, drop <-chan struct{}) { + data := iterData{iter, drop} + m <- heapRequest{cmd: h_iter, data: data} +} + +func (m heapManager) drain(iter chan<- *Bar, drop <-chan struct{}) { + data := iterData{iter, drop} + m <- heapRequest{cmd: h_drain, data: data} +} + +func (m heapManager) fix(b *Bar, priority int) { + data := fixData{b, priority} + m <- heapRequest{cmd: h_fix, data: data} +} + +func (m heapManager) state(ch chan<- bool) { + m <- heapRequest{cmd: h_state, data: ch} +} + +func (m heapManager) end(ch chan<- interface{}) { + m <- heapRequest{cmd: h_end, data: ch} +} + +func syncWidth(matrix map[int][]chan int, drop <-chan struct{}) { + for _, column := range matrix { + go maxWidthDistributor(column, drop) + } +} + +func maxWidthDistributor(column []chan int, drop <-chan struct{}) { + var maxWidth int + for _, ch := range column { + select { + case w := <-ch: + if w > maxWidth { + maxWidth = w + } + case <-drop: + return + } + } + for _, ch := range column { + ch <- maxWidth + } +} diff --git a/vendor/github.com/vbauerster/mpb/v7/internal/percentage.go b/vendor/github.com/vbauerster/mpb/v8/internal/percentage.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v7/internal/percentage.go rename to vendor/github.com/vbauerster/mpb/v8/internal/percentage.go diff --git a/vendor/github.com/vbauerster/mpb/v7/internal/width.go b/vendor/github.com/vbauerster/mpb/v8/internal/width.go similarity index 100% rename from vendor/github.com/vbauerster/mpb/v7/internal/width.go rename to vendor/github.com/vbauerster/mpb/v8/internal/width.go diff --git a/vendor/github.com/vbauerster/mpb/v7/priority_queue.go b/vendor/github.com/vbauerster/mpb/v8/priority_queue.go similarity index 84% rename from vendor/github.com/vbauerster/mpb/v7/priority_queue.go rename to vendor/github.com/vbauerster/mpb/v8/priority_queue.go index 152482e9a..f4091f690 100644 --- a/vendor/github.com/vbauerster/mpb/v7/priority_queue.go +++ b/vendor/github.com/vbauerster/mpb/v8/priority_queue.go @@ -1,12 +1,15 @@ package mpb -// A priorityQueue implements heap.Interface +import "container/heap" + +var _ heap.Interface = (*priorityQueue)(nil) + type priorityQueue []*Bar func (pq priorityQueue) Len() int { return len(pq) } func (pq priorityQueue) Less(i, j int) bool { - // less priority pops first + // greater priority pops first return pq[i].priority > pq[j].priority } diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go new file mode 100644 index 000000000..9bb557ee4 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -0,0 +1,451 @@ +package mpb + +import ( + "bytes" + "context" + "fmt" + "io" + "math" + "os" + "sync" + "time" + + "github.com/vbauerster/mpb/v8/cwriter" + "github.com/vbauerster/mpb/v8/decor" +) + +const ( + defaultRefreshRate = 150 * time.Millisecond +) + +// DoneError represents an error when `*mpb.Progress` is done but its functionality is requested. +var DoneError = fmt.Errorf("%T instance can't be reused after it's done!", (*Progress)(nil)) + +// Progress represents a container that renders one or more progress bars. +type Progress struct { + uwg *sync.WaitGroup + pwg, bwg sync.WaitGroup + operateState chan func(*pState) + interceptIO chan func(io.Writer) + done <-chan struct{} + cancel func() +} + +// pState holds bars in its priorityQueue, it gets passed to (*Progress).serve monitor goroutine. +type pState struct { + ctx context.Context + hm heapManager + dropS, dropD chan struct{} + renderReq chan time.Time + idCount int + popPriority int + + // following are provided/overrided by user + refreshRate time.Duration + reqWidth int + popCompleted bool + autoRefresh bool + delayRC <-chan struct{} + manualRC <-chan interface{} + shutdownNotifier chan<- interface{} + queueBars map[*Bar]*Bar + output io.Writer + debugOut io.Writer + uwg *sync.WaitGroup +} + +// New creates new Progress container instance. It's not possible to +// reuse instance after (*Progress).Wait method has been called. +func New(options ...ContainerOption) *Progress { + return NewWithContext(context.Background(), options...) +} + +// NewWithContext creates new Progress container instance with provided +// context. It's not possible to reuse instance after (*Progress).Wait +// method has been called. +func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress { + if ctx == nil { + ctx = context.Background() + } + ctx, cancel := context.WithCancel(ctx) + s := &pState{ + ctx: ctx, + hm: make(heapManager), + dropS: make(chan struct{}), + dropD: make(chan struct{}), + renderReq: make(chan time.Time), + refreshRate: defaultRefreshRate, + popPriority: math.MinInt32, + queueBars: make(map[*Bar]*Bar), + output: os.Stdout, + debugOut: io.Discard, + } + + for _, opt := range options { + if opt != nil { + opt(s) + } + } + + p := &Progress{ + uwg: s.uwg, + operateState: make(chan func(*pState)), + interceptIO: make(chan func(io.Writer)), + cancel: cancel, + } + + cw := cwriter.New(s.output) + if s.manualRC != nil { + done := make(chan struct{}) + p.done = done + s.autoRefresh = false + go s.manualRefreshListener(done) + } else if cw.IsTerminal() || s.autoRefresh { + done := make(chan struct{}) + p.done = done + s.autoRefresh = true + go s.autoRefreshListener(done) + } else { + p.done = ctx.Done() + s.autoRefresh = false + } + + p.pwg.Add(1) + go p.serve(s, cw) + go s.hm.run() + return p +} + +// AddBar creates a bar with default bar filler. +func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { + return p.New(total, BarStyle(), options...) +} + +// AddSpinner creates a bar with default spinner filler. +func (p *Progress) AddSpinner(total int64, options ...BarOption) *Bar { + return p.New(total, SpinnerStyle(), options...) +} + +// New creates a bar by calling `Build` method on provided `BarFillerBuilder`. +func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOption) *Bar { + return p.MustAdd(total, builder.Build(), options...) +} + +// MustAdd creates a bar which renders itself by provided BarFiller. +// If `total <= 0` triggering complete event by increment methods is +// disabled. Panics if *Progress instance is done, i.e. called after +// (*Progress).Wait(). +func (p *Progress) MustAdd(total int64, filler BarFiller, options ...BarOption) *Bar { + bar, err := p.Add(total, filler, options...) + if err != nil { + panic(err) + } + return bar +} + +// Add creates a bar which renders itself by provided BarFiller. +// If `total <= 0` triggering complete event by increment methods +// is disabled. If *Progress instance is done, i.e. called after +// (*Progress).Wait(), return error == DoneError. +func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) (*Bar, error) { + if filler == nil { + filler = NopStyle().Build() + } + type result struct { + bar *Bar + bs *bState + } + ch := make(chan result) + select { + case p.operateState <- func(ps *pState) { + bs := ps.makeBarState(total, filler, options...) + bar := newBar(ps.ctx, p, bs) + if bs.waitBar != nil { + ps.queueBars[bs.waitBar] = bar + } else { + ps.hm.push(bar, true) + } + ps.idCount++ + ch <- result{bar, bs} + }: + res := <-ch + bar, bs := res.bar, res.bs + bar.TraverseDecorators(func(d decor.Decorator) { + if d, ok := d.(decor.AverageDecorator); ok { + bs.averageDecorators = append(bs.averageDecorators, d) + } + if d, ok := d.(decor.EwmaDecorator); ok { + bs.ewmaDecorators = append(bs.ewmaDecorators, d) + } + if d, ok := d.(decor.ShutdownListener); ok { + bs.shutdownListeners = append(bs.shutdownListeners, d) + } + }) + return bar, nil + case <-p.done: + return nil, DoneError + } +} + +func (p *Progress) traverseBars(cb func(b *Bar) bool) { + iter, drop := make(chan *Bar), make(chan struct{}) + select { + case p.operateState <- func(s *pState) { s.hm.iter(iter, drop) }: + for b := range iter { + if cb(b) { + close(drop) + break + } + } + case <-p.done: + } +} + +// UpdateBarPriority same as *Bar.SetPriority(int). +func (p *Progress) UpdateBarPriority(b *Bar, priority int) { + select { + case p.operateState <- func(s *pState) { s.hm.fix(b, priority) }: + case <-p.done: + } +} + +// Write is implementation of io.Writer. +// Writing to `*mpb.Progress` will print lines above a running bar. +// Writes aren't flushed immediately, but at next refresh cycle. +// If Write is called after `*mpb.Progress` is done, `mpb.DoneError` +// is returned. +func (p *Progress) Write(b []byte) (int, error) { + type result struct { + n int + err error + } + ch := make(chan result) + select { + case p.interceptIO <- func(w io.Writer) { + n, err := w.Write(b) + ch <- result{n, err} + }: + res := <-ch + return res.n, res.err + case <-p.done: + return 0, DoneError + } +} + +// Wait waits for all bars to complete and finally shutdowns container. After +// this method has been called, there is no way to reuse (*Progress) instance. +func (p *Progress) Wait() { + // wait for user wg, if any + if p.uwg != nil { + p.uwg.Wait() + } + + p.bwg.Wait() + p.Shutdown() +} + +// Shutdown cancels any running bar immediately and then shutdowns (*Progress) +// instance. Normally this method shouldn't be called unless you know what you +// are doing. Proper way to shutdown is to call (*Progress).Wait() instead. +func (p *Progress) Shutdown() { + p.cancel() + p.pwg.Wait() +} + +func (p *Progress) serve(s *pState, cw *cwriter.Writer) { + defer p.pwg.Done() + render := func() error { return s.render(cw) } + var err error + + for { + select { + case op := <-p.operateState: + op(s) + case fn := <-p.interceptIO: + fn(cw) + case <-s.renderReq: + e := render() + if e != nil { + p.cancel() // cancel all bars + render = func() error { return nil } + err = e + } + case <-p.done: + update := make(chan bool) + for s.autoRefresh && err == nil { + s.hm.state(update) + if <-update { + err = render() + } else { + break + } + } + if err != nil { + _, _ = fmt.Fprintln(s.debugOut, err.Error()) + } + s.hm.end(s.shutdownNotifier) + return + } + } +} + +func (s pState) autoRefreshListener(done chan struct{}) { + if s.delayRC != nil { + <-s.delayRC + } + ticker := time.NewTicker(s.refreshRate) + defer ticker.Stop() + for { + select { + case t := <-ticker.C: + s.renderReq <- t + case <-s.ctx.Done(): + close(done) + return + } + } +} + +func (s pState) manualRefreshListener(done chan struct{}) { + for { + select { + case x := <-s.manualRC: + if t, ok := x.(time.Time); ok { + s.renderReq <- t + } else { + s.renderReq <- time.Now() + } + case <-s.ctx.Done(): + close(done) + return + } + } +} + +func (s *pState) render(cw *cwriter.Writer) (err error) { + s.hm.sync(s.dropS) + iter := make(chan *Bar) + go s.hm.iter(iter, s.dropS) + + var width, height int + if cw.IsTerminal() { + width, height, err = cw.GetTermSize() + if err != nil { + close(s.dropS) + return err + } + } else { + if s.reqWidth > 0 { + width = s.reqWidth + } else { + width = 100 + } + height = 100 + } + + for b := range iter { + go b.render(width) + } + + return s.flush(cw, height) +} + +func (s *pState) flush(cw *cwriter.Writer, height int) error { + wg := new(sync.WaitGroup) + defer wg.Wait() // waiting for all s.hm.push to complete + + var popCount int + var rows []io.Reader + + iter := make(chan *Bar) + s.hm.drain(iter, s.dropD) + + for b := range iter { + frame := <-b.frameCh + if frame.err != nil { + close(s.dropD) + b.cancel() + return frame.err // b.frameCh is buffered it's ok to return here + } + var usedRows int + for i := len(frame.rows) - 1; i >= 0; i-- { + if row := frame.rows[i]; len(rows) < height { + rows = append(rows, row) + usedRows++ + } else { + _, _ = io.Copy(io.Discard, row) + } + } + if frame.shutdown != 0 && !frame.done { + if qb, ok := s.queueBars[b]; ok { + b.cancel() + delete(s.queueBars, b) + qb.priority = b.priority + wg.Add(1) + go func(b *Bar) { + s.hm.push(b, true) + wg.Done() + }(qb) + continue + } + if s.popCompleted && !frame.noPop { + switch frame.shutdown { + case 1: + b.priority = s.popPriority + s.popPriority++ + default: + b.cancel() + popCount += usedRows + continue + } + } else if frame.rmOnComplete { + b.cancel() + continue + } else { + b.cancel() + } + } + wg.Add(1) + go func(b *Bar) { + s.hm.push(b, false) + wg.Done() + }(b) + } + + for i := len(rows) - 1; i >= 0; i-- { + _, err := cw.ReadFrom(rows[i]) + if err != nil { + return err + } + } + + return cw.Flush(len(rows) - popCount) +} + +func (s pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState { + bs := &bState{ + id: s.idCount, + priority: s.idCount, + reqWidth: s.reqWidth, + total: total, + filler: filler, + renderReq: s.renderReq, + autoRefresh: s.autoRefresh, + } + + if total > 0 { + bs.triggerComplete = true + } + + for _, opt := range options { + if opt != nil { + opt(bs) + } + } + + for i := 0; i < len(bs.buffers); i++ { + bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512)) + } + + return bs +} diff --git a/vendor/github.com/vbauerster/mpb/v8/proxyreader.go b/vendor/github.com/vbauerster/mpb/v8/proxyreader.go new file mode 100644 index 000000000..b0e7720b9 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v8/proxyreader.go @@ -0,0 +1,96 @@ +package mpb + +import ( + "io" + "time" +) + +type proxyReader struct { + io.ReadCloser + bar *Bar +} + +func (x proxyReader) Read(p []byte) (int, error) { + n, err := x.ReadCloser.Read(p) + x.bar.IncrBy(n) + return n, err +} + +type proxyWriterTo struct { + proxyReader +} + +func (x proxyWriterTo) WriteTo(w io.Writer) (int64, error) { + n, err := x.ReadCloser.(io.WriterTo).WriteTo(w) + x.bar.IncrInt64(n) + return n, err +} + +type ewmaProxyReader struct { + io.ReadCloser + bar *Bar +} + +func (x ewmaProxyReader) Read(p []byte) (int, error) { + start := time.Now() + n, err := x.ReadCloser.Read(p) + x.bar.EwmaIncrBy(n, time.Since(start)) + return n, err +} + +type ewmaProxyWriterTo struct { + ewmaProxyReader +} + +func (x ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) { + start := time.Now() + n, err := x.ReadCloser.(io.WriterTo).WriteTo(w) + x.bar.EwmaIncrInt64(n, time.Since(start)) + return n, err +} + +func newProxyReader(r io.Reader, b *Bar, hasEwma bool) io.ReadCloser { + rc := toReadCloser(r) + if hasEwma { + epr := ewmaProxyReader{rc, b} + if _, ok := r.(io.WriterTo); ok { + return ewmaProxyWriterTo{epr} + } + return epr + } + pr := proxyReader{rc, b} + if _, ok := r.(io.WriterTo); ok { + return proxyWriterTo{pr} + } + return pr +} + +func toReadCloser(r io.Reader) io.ReadCloser { + if rc, ok := r.(io.ReadCloser); ok { + return rc + } + return toNopReadCloser(r) +} + +func toNopReadCloser(r io.Reader) io.ReadCloser { + if _, ok := r.(io.WriterTo); ok { + return nopReadCloserWriterTo{r} + } + return nopReadCloser{r} +} + +type nopReadCloser struct { + io.Reader +} + +func (nopReadCloser) Close() error { return nil } + +type nopReadCloserWriterTo struct { + io.Reader +} + +func (nopReadCloserWriterTo) Close() error { return nil } + +func (c nopReadCloserWriterTo) WriteTo(w io.Writer) (int64, error) { + return c.Reader.(io.WriterTo).WriteTo(w) +} diff --git a/vendor/github.com/vbauerster/mpb/v8/proxywriter.go b/vendor/github.com/vbauerster/mpb/v8/proxywriter.go new file mode 100644 index 000000000..f260dafa8 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v8/proxywriter.go @@ -0,0 +1,96 @@ +package mpb + +import ( + "io" + "time" +) + +type proxyWriter struct { + io.WriteCloser + bar *Bar +} + +func (x proxyWriter) Write(p []byte) (int, error) { + n, err := x.WriteCloser.Write(p) + x.bar.IncrBy(n) + return n, err +} + +type proxyReaderFrom struct { + proxyWriter +} + +func (x proxyReaderFrom) ReadFrom(r io.Reader) (int64, error) { + n, err := x.WriteCloser.(io.ReaderFrom).ReadFrom(r) + x.bar.IncrInt64(n) + return n, err +} + +type ewmaProxyWriter struct { + io.WriteCloser + bar *Bar +} + +func (x ewmaProxyWriter) Write(p []byte) (int, error) { + start := time.Now() + n, err := x.WriteCloser.Write(p) + x.bar.EwmaIncrBy(n, time.Since(start)) + return n, err +} + +type ewmaProxyReaderFrom struct { + ewmaProxyWriter +} + +func (x ewmaProxyReaderFrom) ReadFrom(r io.Reader) (int64, error) { + start := time.Now() + n, err := x.WriteCloser.(io.ReaderFrom).ReadFrom(r) + x.bar.EwmaIncrInt64(n, time.Since(start)) + return n, err +} + +func newProxyWriter(w io.Writer, b *Bar, hasEwma bool) io.WriteCloser { + wc := toWriteCloser(w) + if hasEwma { + epw := ewmaProxyWriter{wc, b} + if _, ok := w.(io.ReaderFrom); ok { + return ewmaProxyReaderFrom{epw} + } + return epw + } + pw := proxyWriter{wc, b} + if _, ok := w.(io.ReaderFrom); ok { + return proxyReaderFrom{pw} + } + return pw +} + +func toWriteCloser(w io.Writer) io.WriteCloser { + if wc, ok := w.(io.WriteCloser); ok { + return wc + } + return toNopWriteCloser(w) +} + +func toNopWriteCloser(w io.Writer) io.WriteCloser { + if _, ok := w.(io.ReaderFrom); ok { + return nopWriteCloserReaderFrom{w} + } + return nopWriteCloser{w} +} + +type nopWriteCloser struct { + io.Writer +} + +func (nopWriteCloser) Close() error { return nil } + +type nopWriteCloserReaderFrom struct { + io.Writer +} + +func (nopWriteCloserReaderFrom) Close() error { return nil } + +func (c nopWriteCloserReaderFrom) ReadFrom(r io.Reader) (int64, error) { + return c.Writer.(io.ReaderFrom).ReadFrom(r) +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go index 54aa617cf..789d2b982 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go @@ -323,7 +323,7 @@ func (v Value) String() string { if !ok { return "" } - return fmt.Sprintf(`{"$timestamp":{"t":"%s","i":"%s"}}`, strconv.FormatUint(uint64(t), 10), strconv.FormatUint(uint64(i), 10)) + return fmt.Sprintf(`{"$timestamp":{"t":%v,"i":%v}}`, t, i) case bsontype.Int64: i64, ok := v.Int64OK() if !ok { diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 000000000..2c033dff4 --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go new file mode 100644 index 000000000..ecc0dabb7 --- /dev/null +++ b/vendor/golang.org/x/exp/maps/maps.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package maps defines various functions useful with maps of any type. +package maps + +// Keys returns the keys of the map m. +// The keys will be in an indeterminate order. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// Values returns the values of the map m. +// The values will be in an indeterminate order. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} + +// Equal reports whether two maps contain the same key/value pairs. +// Values are compared using ==. +func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || v1 != v2 { + return false + } + } + return true +} + +// EqualFunc is like Equal, but compares values using eq. +// Keys are still compared with ==. +func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || !eq(v1, v2) { + return false + } + } + return true +} + +// Clear removes all entries from m, leaving it empty. +func Clear[M ~map[K]V, K comparable, V any](m M) { + for k := range m { + delete(m, k) + } +} + +// Clone returns a copy of m. This is a shallow clone: +// the new keys and values are set using ordinary assignment. +func Clone[M ~map[K]V, K comparable, V any](m M) M { + // Preserve nil in case it matters. + if m == nil { + return nil + } + r := make(M, len(m)) + for k, v := range m { + r[k] = v + } + return r +} + +// Copy copies all key/value pairs in src adding them to dst. +// When a key in src is already present in dst, +// the value in dst will be overwritten by the value associated +// with the key in src. +func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { + for k, v := range src { + dst[k] = v + } +} + +// DeleteFunc deletes any key/value pairs from m for which del returns true. +func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { + for k, v := range m { + if del(k, v) { + delete(m, k) + } + } +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 000000000..cff0cd49e --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,258 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +// Unless otherwise specified, these functions all apply to the elements +// of a slice at index 0 <= i < len(s). +// +// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a +// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), +// or the sorting may fail to sort correctly. A common case is when sorting slices of +// floating-point numbers containing NaN values. +package slices + +import "golang.org/x/exp/constraints" + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[E comparable](s1, s2 []E) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using a comparison +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2. +// The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// Comparisons involving floating point NaNs are ignored. +func Compare[E constraints.Ordered](s1, s2 []E) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// CompareFunc is like Compare but uses a comparison function +// on each pair of elements. The elements are compared in increasing +// index order, and the comparisons stop after the first time cmp +// returns non-zero. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[E comparable](s []E, v E) int { + for i, vs := range s { + if v == vs { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[E any](s []E, f func(E) bool) int { + for i, v := range s { + if f(v) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[E comparable](s []E, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[E any](s []E, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// In the returned slice r, r[i] == v[0]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + tot := len(s) + len(v) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[i:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[i:]) + return s2 +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete modifies the contents of the slice s; it does not create a new slice. +// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + + return append(s[:i], s[j:]...) +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[j:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s; it does not create a new slice. +// When Compact discards m elements in total, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage collected. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if v != last { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// CompactFunc is like Compact but uses a comparison function. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if !eq(v, last) { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 000000000..f14f40da7 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,126 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// Sort may fail to sort correctly when sorting slices of floating-point +// numbers containing Not-a-number (NaN) values. +// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) +// instead if the input may contain NaNs. +func Sort[E constraints.Ordered](x []E) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the less function. +// This sort is not guaranteed to be stable. +// +// SortFunc requires that less is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[E any](x []E, less func(a, b E) bool) { + n := len(x) + pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using less to compare elements. +func SortStableFunc[E any](x []E, less func(a, b E) bool) { + stableLessFunc(x, len(x), less) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[E constraints.Ordered](x []E) bool { + for i := len(x) - 1; i > 0; i-- { + if x[i] < x[i-1] { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with less as the +// comparison function. +func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { + for i := len(x) - 1; i > 0; i-- { + if less(x[i], x[i-1]) { + return false + } + } + return true +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if x[h] < target { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && x[i] == target +} + +// BinarySearchFunc works like BinarySearch, but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" is +// defined by cmp. cmp(a, b) is expected to return an integer comparing the two +// parameters: 0 if a == b, a negative number if a < b and a positive number if +// a > b. +func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go new file mode 100644 index 000000000..2a632476c --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortLessFunc sorts data[a:b] using insertion sort. +func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + for i := a + 1; i < b; i++ { + for j := i; j > a && less(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownLessFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && less(data[first+child], data[first+child+1]) { + child++ + } + if !less(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownLessFunc(data, i, hi, first, less) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownLessFunc(data, lo, i, first, less) + } +} + +// pdqsortLessFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortLessFunc(data, a, b, less) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortLessFunc(data, a, b, less) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsLessFunc(data, a, b, less) + limit-- + } + + pivot, hint := choosePivotLessFunc(data, a, b, less) + if hint == decreasingHint { + reverseRangeLessFunc(data, a, b, less) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortLessFunc(data, a, b, less) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !less(data[a-1], data[pivot]) { + mid := partitionEqualLessFunc(data, a, b, pivot, less) + a = mid + continue + } + + mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortLessFunc(data, a, mid, limit, less) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortLessFunc(data, mid+1, b, limit, less) + b = mid + } + } +} + +// partitionLessFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !less(data[a], data[i]) { + i++ + } + for i <= j && less(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !less(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotLessFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentLessFunc(data, i, &swaps, less) + j = medianAdjacentLessFunc(data, j, &swaps, less) + k = medianAdjacentLessFunc(data, k, &swaps, less) + } + // Find the median among i, j, k and stores it into j. + j = medianLessFunc(data, i, j, k, &swaps, less) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { + if less(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { + a, b = order2LessFunc(data, a, b, swaps, less) + b, c = order2LessFunc(data, b, c, swaps, less) + a, b = order2LessFunc(data, a, b, swaps, less) + return b +} + +// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { + return medianLessFunc(data, a-1, a, a+1, swaps, less) +} + +func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortLessFunc(data, a, b, less) + a = b + b += blockSize + } + insertionSortLessFunc(data, a, n, less) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeLessFunc(data, a, a+blockSize, b, less) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeLessFunc(data, a, m, n, less) + } + blockSize *= 2 + } +} + +// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if less(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !less(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !less(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateLessFunc(data, start, m, end, less) + } + if a < start && start < mid { + symMergeLessFunc(data, a, start, mid, less) + } + if mid < end && end < b { + symMergeLessFunc(data, mid, end, b, less) + } +} + +// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeLessFunc(data, m-i, m, j, less) + i -= j + } else { + swapRangeLessFunc(data, m-i, m+j-i, i, less) + j -= i + } + } + // i == j + swapRangeLessFunc(data, m-i, m, i, less) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 000000000..efaa1c8b7 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j] < data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child] < data[first+child+1]) { + child++ + } + if !(data[first+root] < data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(data[a-1] < data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(data[a] < data[i]) { + i++ + } + for i <= j && (data[a] < data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(data[i] < data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if data[b] < data[a] { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if data[h] < data[a] { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(data[m] < data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(data[p-c] < data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 52338d004..8e001134d 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -20,6 +20,19 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a @@ -32,14 +45,18 @@ How to get your contributions merged smoothly and quickly. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. -- Don't fix code style and formatting unless you are already changing that line - to address an issue. PRs with irrelevant changes won't be merged. If you do - want to fix formatting or style, do that in a separate PR. +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We expect you to be reasonably - responsive to those comments, otherwise the PR will be closed after 2-3 weeks - of inactivity. + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 66d141fce..8cd89dab9 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.28.1 -// protoc v3.14.0 +// protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index d607d4e9e..b9cc05507 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -146,8 +146,18 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - for _, opt := range extraDialOptions { - opt.apply(&cc.dopts) + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } } for _, opt := range opts { @@ -1103,7 +1113,11 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) return } ac.state = s - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + if lastErr == nil { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1527,6 +1541,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { if scheme == rb.Scheme() { diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go index 0b206a578..934fac2b0 100644 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -18,7 +18,15 @@ package codes -import "strconv" +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} func (c Code) String() string { switch c { @@ -60,3 +68,44 @@ func (c Code) String() string { return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 4866da101..e9d6852fd 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -38,13 +38,14 @@ import ( func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { - extraDialOptions = append(extraDialOptions, opt...) + globalDialOptions = append(globalDialOptions, opt...) } internal.ClearGlobalDialOptions = func() { - extraDialOptions = nil + globalDialOptions = nil } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -83,7 +84,7 @@ type DialOption interface { apply(*dialOptions) } -var extraDialOptions []DialOption +var globalDialOptions []DialOption // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. @@ -96,6 +97,16 @@ type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 809d73cca..af03a40d9 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -28,8 +28,10 @@ import ( "google.golang.org/grpc/internal/grpcutil" ) -// Logger is the global binary logger. It can be used to get binary logger for -// each method. +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. type Logger interface { GetMethodLogger(methodName string) MethodLogger } @@ -40,8 +42,6 @@ type Logger interface { // It is used to get a MethodLogger for each individual method. var binLogger Logger -var grpclogLogger = grpclog.Component("binarylog") - // SetLogger sets the binary logger. // // Only call this at init time. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index d71e44177..56fcf008d 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -19,6 +19,7 @@ package binarylog import ( + "context" "net" "strings" "sync/atomic" @@ -49,7 +50,7 @@ var idGen callIDGenerator // MethodLogger is the sub-logger for each method. type MethodLogger interface { - Log(LogEntryConfig) + Log(context.Context, LogEntryConfig) } // TruncatingMethodLogger is a method logger that truncates headers and messages @@ -98,7 +99,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 82af70e96..02224b42c 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { // Debugf does info logging at verbose level 2. func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. if !Logger.V(2) { return } @@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { return } InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) } // NewPrefixLogger creates a prefix logger with the given prefix. diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 0a76d9de6..836b6a3b3 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -58,6 +58,9 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + CanonicalString interface{} // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular @@ -74,6 +77,10 @@ var ( // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. AddGlobalDialOptions interface{} // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + DisableGlobalDialOptions interface{} // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. ClearGlobalDialOptions func() @@ -130,6 +137,9 @@ var ( // // TODO: Remove this function once the RBAC env var is removed. UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index b2980f8ac..c82e608e0 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -76,33 +76,11 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { return addr } -// Validate returns an error if the input md contains invalid keys or values. -// -// If the header is not a pseudo-header, the following items are checked: -// - header names must contain one or more characters from this set [0-9 a-z _ - .]. -// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. -// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +// Validate validates every pair in md with ValidatePair. func Validate(md metadata.MD) error { for k, vals := range md { - // pseudo-header will be ignored - if k[0] == ':' { - continue - } - // check key, for i that saving a conversion if not using for range - for i := 0; i < len(k); i++ { - r := k[i] - if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { - return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) - } - } - if strings.HasSuffix(k, "-bin") { - continue - } - // check value - for _, val := range vals { - if hasNotPrintable(val) { - return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) - } + if err := ValidatePair(k, vals...); err != nil { + return err } } return nil @@ -118,3 +96,37 @@ func hasNotPrintable(msg string) bool { } return false } + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 9097385e1..c343c23a5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -22,6 +22,7 @@ import ( "bytes" "errors" "fmt" + "net" "runtime" "strconv" "sync" @@ -486,12 +487,13 @@ type loopyWriter struct { hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool + conn net.Conn // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -504,6 +506,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hBuf: &buf, hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, + conn: conn, } return l } @@ -521,15 +524,27 @@ const minBatchSize = 1000 // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the activeStreams linked-list. -// This results in writing of HTTP2 frames into an underlying write buffer. -// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. -// As an optimization, to increase the batch size for each flush, loopy yields the processor, once -// if the batch size is too low to give stream goroutines a chance to fill it up. +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes and closes the underlying connection. Otherwise, the connection is +// left open to allow the I/O error to be encountered by the reader instead. func (l *loopyWriter) run() (err error) { - // Always flush the writer before exiting in case there are pending frames - // to be sent. - defer l.framer.writer.Flush() + defer func() { + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exiting with error: %v", err) + } + if !isIOError(err) { + l.framer.writer.Flush() + l.conn.Close() + } + l.cbuf.finish() + }() for { it, err := l.cbuf.get(true) if err != nil { @@ -581,11 +596,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment - return nil + return } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { @@ -593,10 +608,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) - return nil + return } } - return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { @@ -604,13 +618,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - if err := l.applySettings(s.ss); err != nil { - return err - } + l.applySettings(s.ss) return l.framer.fr.WriteSettingsAck() } -func (l *loopyWriter) registerStreamHandler(h *registerStream) error { +func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ id: h.streamID, state: empty, @@ -618,7 +630,6 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error { wq: h.wq, } l.estdStreams[h.streamID] = str - return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { @@ -720,10 +731,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He return nil } -func (l *loopyWriter) preprocessData(df *dataFrame) error { +func (l *loopyWriter) preprocessData(df *dataFrame) { str, ok := l.estdStreams[df.streamID] if !ok { - return nil + return } // If we got data for a stream it means that // stream was originated and the headers were sent out. @@ -732,7 +743,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error { str.state = active l.activeStreams.enqueue(str) } - return nil } func (l *loopyWriter) pingHandler(p *ping) error { @@ -743,9 +753,8 @@ func (l *loopyWriter) pingHandler(p *ping) error { } -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { o.resp <- l.sendQuota - return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { @@ -763,6 +772,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { } } if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. return errors.New("finished processing active streams while in draining mode") } return nil @@ -798,6 +808,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. return errors.New("received GOAWAY with no active streams") } } @@ -816,17 +827,10 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) closeConnectionHandler() error { - // Exit loopyWriter entirely by returning an error here. This will lead to - // the transport closing the connection, and, ultimately, transport - // closure. - return ErrConnClosing -} - func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: - return l.incomingWindowUpdateHandler(i) + l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: @@ -836,7 +840,7 @@ func (l *loopyWriter) handle(i interface{}) error { case *headerFrame: return l.headerHandler(i) case *registerStream: - return l.registerStreamHandler(i) + l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) case *earlyAbortStream: @@ -844,21 +848,24 @@ func (l *loopyWriter) handle(i interface{}) error { case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: - return l.preprocessData(i) + l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: - return l.outFlowControlSizeRequestHandler(i) + l.outFlowControlSizeRequestHandler(i) case closeConnection: - return l.closeConnectionHandler() + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing default: return fmt.Errorf("transport: unknown control message type %T", i) } + return nil } -func (l *loopyWriter) applySettings(ss []http2.Setting) error { +func (l *loopyWriter) applySettings(ss []http2.Setting) { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: @@ -877,7 +884,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { updateHeaderTblSize(l.hEnc, s.Val) } } - return nil } // processData removes the first stream from active streams, writes out at most 16KB @@ -911,7 +917,7 @@ func (l *loopyWriter) processData() (bool, error) { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, nil + return false, err } } else { l.activeStreams.enqueue(str) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 79ee8aea0..9826feb8c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -444,15 +444,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) - err := t.loopy.run() - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) - } - // Do not close the transport. Let reader goroutine handle it since - // there might be data in the buffers. - t.conn.Close() - t.controlBuf.finish() + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn) + t.loopy.run() close(t.writerDone) }() return t, nil @@ -1264,10 +1257,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - if logger.V(logLevel) { - logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") - } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") } id := f.LastStreamID if id > 0 && id%2 == 0 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index bc3da7067..99ae1a737 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -331,14 +331,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - err := t.loopy.run() - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) - } - t.conn.Close() - t.controlBuf.finish() + t.loopy.run() close(t.writerDone) }() go t.keepalive() @@ -383,7 +378,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // if false, content-type was missing or invalid isGRPC = false contentType = "" - mdata = make(map[string][]string) + mdata = make(metadata.MD, len(frame.Fields)) httpMethod string // these are set if an error is encountered while parsing the headers protocolError bool @@ -404,6 +399,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( mdata[hf.Name] = append(mdata[hf.Name], hf.Value) s.contentSubtype = contentSubtype isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors case "grpc-encoding": s.recvCompress = hf.Value case ":method": @@ -595,7 +601,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(mdata).Copy(), + Header: mdata.Copy(), } sh.HandleRPC(s.ctx, inHeader) } @@ -1344,9 +1350,6 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, err } if retErr != nil { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() return false, retErr } return true, nil diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 2c601a864..8fcae4f4d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -21,6 +21,7 @@ package transport import ( "bufio" "encoding/base64" + "errors" "fmt" "io" "math" @@ -330,7 +331,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - return w.conn.Write(b) + n, err = w.conn.Write(b) + return n, toIOError(err) } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) @@ -352,10 +354,30 @@ func (w *bufWriter) Flush() error { return nil } _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) w.offset = 0 return w.err } +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + type framer struct { writer *bufWriter fr *http2.Framer diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 0ac77ea4f..1b7d7fabc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -257,6 +257,9 @@ type Stream struct { fc *inFlow wq *writeQuota + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) @@ -345,8 +348,24 @@ func (s *Stream) RecvCompress() string { } // SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() string { + return s.clientAdvertisedCompressors } // Done returns a channel which is closed when it receives the final status diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index fb4a88f59..a2cdcaf12 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -91,7 +91,11 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { - return Join(md) + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out } // Get obtains the values for a given key. @@ -171,8 +175,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) - added[len(added)-1] = make([]string, len(kv)) - copy(added[len(added)-1], kv) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 654e9ce69..6215e5ef2 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -41,8 +41,9 @@ var ( // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. b.Scheme will be -// used as the scheme registered with this builder. +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are @@ -203,6 +204,15 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling @@ -280,8 +290,10 @@ type Builder interface { // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. Scheme() string } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index cb7020ebe..2030736a3 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -159,6 +159,7 @@ type callInfo struct { contentSubtype string codec baseCodec maxRetryRPCBufferSize int + onFinish []func(err error) } func defaultCallInfo() *callInfo { @@ -295,6 +296,41 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} + // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default // 4MB. @@ -658,12 +694,13 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - SentTime: t, + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + CompressedLength: len(payload), + SentTime: t, } } @@ -684,7 +721,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool } type payloadInfo struct { - wireLength int // The compressed length got from wire. + compressedLength int // The compressed length got from wire. uncompressedBytes []byte } @@ -694,7 +731,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, err } if payInfo != nil { - payInfo.wireLength = len(d) + payInfo.compressedLength = len(d) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index d5a6e78be..087b9ad7c 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -74,10 +75,10 @@ func init() { srv.drainServerTransports(addr) } internal.AddGlobalServerOptions = func(opt ...ServerOption) { - extraServerOptions = append(extraServerOptions, opt...) + globalServerOptions = append(globalServerOptions, opt...) } internal.ClearGlobalServerOptions = func() { - extraServerOptions = nil + globalServerOptions = nil } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption @@ -183,7 +184,7 @@ var defaultServerOptions = serverOptions{ writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } -var extraServerOptions []ServerOption +var globalServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -600,7 +601,7 @@ func (s *Server) stopServerWorkers() { // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions - for _, o := range extraServerOptions { + for _, o := range globalServerOptions { o.apply(&opts) } for _, o := range opt { @@ -1252,7 +1253,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. logEntry.PeerAddr = peer.Addr } for _, binlog := range binlogs { - binlog.Log(logEntry) + binlog.Log(ctx, logEntry) } } @@ -1263,6 +1264,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor + var sendCompressorName string // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. @@ -1283,12 +1285,18 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp - stream.SetSendCompress(cp.Type()) + sendCompressorName = cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { - stream.SetSendCompress(rc) + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } @@ -1312,11 +1320,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - WireLength: payInfo.wireLength + headerLen, - Data: d, - Length: len(d), + RecvTime: time.Now(), + Payload: v, + Length: len(d), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Data: d, }) } if len(binlogs) != 0 { @@ -1324,7 +1333,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(cm) + binlog.Log(stream.Context(), cm) } } if trInfo != nil { @@ -1357,7 +1366,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(sh) + binlog.Log(stream.Context(), sh) } } st := &binarylog.ServerTrailer{ @@ -1365,7 +1374,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } return appErr @@ -1375,6 +1384,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } opts := &transport.Options{Last: true} + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). @@ -1402,8 +1416,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(sh) - binlog.Log(st) + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), st) } } return err @@ -1417,8 +1431,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(sh) - binlog.Log(sm) + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), sm) } } if channelz.IsOn() { @@ -1430,17 +1444,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? - err = t.WriteStatus(stream, statusOK) if len(binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, } for _, binlog := range binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } - return err + return t.WriteStatus(stream, statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1574,7 +1587,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(logEntry) + binlog.Log(stream.Context(), logEntry) } } @@ -1597,12 +1610,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp - stream.SetSendCompress(s.opts.cp.Type()) + ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { - stream.SetSendCompress(rc) + ss.sendCompressorName = rc + } + } + + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } @@ -1640,16 +1659,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.SetError() ss.mu.Unlock() } - t.WriteStatus(ss.s, appStatus) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } + t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1658,17 +1677,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - err = t.WriteStatus(ss.s, statusOK) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } - return err + return t.WriteStatus(ss.s, statusOK) } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { @@ -1935,6 +1953,60 @@ func SendHeader(ctx context.Context, md metadata.MD) error { return nil } +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil +} + // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. // @@ -1969,3 +2041,22 @@ type channelzServer struct { func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { return c.s.channelzMetric() } + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name, clientCompressors string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range strings.Split(clientCompressors, ",") { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 0285dcc6a..7a552a9b7 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -67,10 +67,18 @@ type InPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int + // RecvTime is the time when the payload is received. RecvTime time.Time } @@ -129,9 +137,15 @@ type OutPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int // SentTime is the time when the payload is sent. SentTime time.Time diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 93231af2a..d1226a412 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -168,10 +168,19 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // validate md if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } } if channelz.IsOn() { cc.incrCallsStarted() @@ -352,7 +361,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } } for _, binlog := range cs.binlogs { - binlog.Log(logEntry) + binlog.Log(cs.ctx, logEntry) } } @@ -800,7 +809,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { } cs.serverHeaderBinlogged = true for _, binlog := range cs.binlogs { - binlog.Log(logEntry) + binlog.Log(cs.ctx, logEntry) } } return m, nil @@ -881,7 +890,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { Message: data, } for _, binlog := range cs.binlogs { - binlog.Log(cm) + binlog.Log(cs.ctx, cm) } } return err @@ -905,7 +914,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error { Message: recvInfo.uncompressedBytes, } for _, binlog := range cs.binlogs { - binlog.Log(sm) + binlog.Log(cs.ctx, sm) } } if err != nil || !cs.desc.ServerStreams { @@ -926,7 +935,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error { logEntry.PeerAddr = peer.Addr } for _, binlog := range cs.binlogs { - binlog.Log(logEntry) + binlog.Log(cs.ctx, logEntry) } } } @@ -953,7 +962,7 @@ func (cs *clientStream) CloseSend() error { OnClientSide: true, } for _, binlog := range cs.binlogs { - binlog.Log(chc) + binlog.Log(cs.ctx, chc) } } // We never returned an error here for reasons. @@ -971,6 +980,9 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) @@ -992,7 +1004,7 @@ func (cs *clientStream) finish(err error) { OnClientSide: true, } for _, binlog := range cs.binlogs { - binlog.Log(c) + binlog.Log(cs.ctx, c) } } if err == nil { @@ -1081,9 +1093,10 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -1511,6 +1524,8 @@ type serverStream struct { comp encoding.Compressor decomp encoding.Compressor + sendCompressorName string + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1558,7 +1573,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(sh) + binlog.Log(ss.ctx, sh) } } return err @@ -1603,6 +1618,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { @@ -1624,14 +1646,14 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(sh) + binlog.Log(ss.ctx, sh) } } sm := &binarylog.ServerMessage{ Message: data, } for _, binlog := range ss.binlogs { - binlog.Log(sm) + binlog.Log(ss.ctx, sm) } } if len(ss.statsHandler) != 0 { @@ -1679,7 +1701,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} for _, binlog := range ss.binlogs { - binlog.Log(chc) + binlog.Log(ss.ctx, chc) } } return err @@ -1695,9 +1717,10 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, }) } } @@ -1706,7 +1729,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { Message: payInfo.uncompressedBytes, } for _, binlog := range ss.binlogs { - binlog.Log(cm) + binlog.Log(ss.ctx, cm) } } return nil diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index fe552c315..3c6e3c911 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.53.0" +const Version = "1.54.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 3728aed04..a8e4732b3 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -41,16 +41,8 @@ if [[ "$1" = "-install" ]]; then github.com/client9/misspell/cmd/misspell popd if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 + if [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -68,8 +60,7 @@ fi # - Check that generated proto files are up to date. if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ + make proto && git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) fi diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 00ea2fecf..21d5d2cb1 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -4,7 +4,7 @@ // Package protojson marshals and unmarshals protocol buffer messages as JSON // format. It follows the guide at -// https://developers.google.com/protocol-buffers/docs/proto3#json. +// https://protobuf.dev/programming-guides/proto3#json. // // This package produces a different output than the standard "encoding/json" // package, which does not operate correctly on protocol buffer messages. diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index c85f84694..6c37d4174 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -814,16 +814,22 @@ func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { return d.unexpectedTokenError(tok) } - t, err := time.Parse(time.RFC3339Nano, tok.ParsedString()) + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) if err != nil { return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) } - // Validate seconds. No need to validate nanos because time.Parse would have - // covered that already. + // Validate seconds. secs := t.Unix() if secs < minTimestampSeconds || secs > maxTimestampSeconds { return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index ce57f57eb..f4b4686cf 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package protowire parses and formats the raw wire encoding. -// See https://developers.google.com/protocol-buffers/docs/encoding. +// See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, // use the "google.golang.org/protobuf/proto" package instead. @@ -29,12 +29,8 @@ const ( ) // IsValid reports whether the field number is semantically valid. -// -// Note that while numbers within the reserved range are semantically invalid, -// they are syntactically valid in the wire format. -// Implementations may treat records with reserved field numbers as unknown. func (n Number) IsValid() bool { - return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber + return MinValidNumber <= n && n <= MaxValidNumber } // Type represents the wire type. diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index b13fd29e8..d043a6ebe 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -294,7 +294,7 @@ func (d *Decoder) isValueNext() bool { } // consumeToken constructs a Token for given Kind with raw value derived from -// current d.in and given size, and consumes the given size-lenght of it. +// current d.in and given size, and consumes the given size-length of it. func (d *Decoder) consumeToken(kind Kind, size int) Token { tok := Token{ kind: kind, diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 427c62d03..87853e786 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -412,12 +412,13 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { // Field number. Identify if input is a valid number that is not negative // and is decimal integer within 32-bit range. if num := parseNumber(d.in); num.size > 0 { + str := num.string(d.in) if !num.neg && num.kind == numDec { - if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { + if _, err := strconv.ParseInt(str, 10, 32); err == nil { return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil } } - return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) + return Token{}, d.newSyntaxError("invalid field number: %s", str) } return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go index 81a5d8c86..45c81f029 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -15,17 +15,12 @@ func (d *Decoder) parseNumberValue() (Token, bool) { if num.neg { numAttrs |= isNegative } - strSize := num.size - last := num.size - 1 - if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { - strSize = last - } tok := Token{ kind: Scalar, attrs: numberValue, pos: len(d.orig) - len(d.in), raw: d.in[:num.size], - str: string(d.in[:strSize]), + str: num.string(d.in), numAttrs: numAttrs, } d.consume(num.size) @@ -46,6 +41,27 @@ type number struct { kind uint8 neg bool size int + // if neg, this is the length of whitespace and comments between + // the minus sign and the rest fo the number literal + sep int +} + +func (num number) string(data []byte) string { + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') { + strSize = last + } + if num.neg && num.sep > 0 { + // strip whitespace/comments between negative sign and the rest + strLen := strSize - num.sep + str := make([]byte, strLen) + str[0] = data[0] + copy(str[1:], data[num.sep+1:strSize]) + return string(str) + } + return string(data[:strSize]) + } // parseNumber constructs a number object from given input. It allows for the @@ -67,19 +83,22 @@ func parseNumber(input []byte) number { } // Optional - + var sep int if s[0] == '-' { neg = true s = s[1:] size++ + // Consume any whitespace or comments between the + // negative sign and the rest of the number + lenBefore := len(s) + s = consume(s, 0) + sep = lenBefore - len(s) + size += sep if len(s) == 0 { return number{} } } - // C++ allows for whitespace and comments in between the negative sign and - // the rest of the number. This logic currently does not but is consistent - // with v1. - switch { case s[0] == '0': if len(s) > 1 { @@ -116,7 +135,7 @@ func parseNumber(input []byte) number { if len(s) > 0 && !isDelim(s[0]) { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } } s = s[1:] @@ -188,5 +207,5 @@ func parseNumber(input []byte) number { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index e3cdf1c20..5c0e8f73f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -50,6 +50,7 @@ const ( FileDescriptorProto_Options_field_name protoreflect.Name = "options" FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + FileDescriptorProto_Edition_field_name protoreflect.Name = "edition" FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" @@ -63,6 +64,7 @@ const ( FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" + FileDescriptorProto_Edition_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.edition" ) // Field numbers for google.protobuf.FileDescriptorProto. @@ -79,6 +81,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 ) // Names for google.protobuf.DescriptorProto. @@ -494,26 +497,29 @@ const ( // Field names for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" - MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" - MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" - MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" - MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) // Field numbers for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 - MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 - MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 - MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.FieldOptions. @@ -528,16 +534,24 @@ const ( FieldOptions_Packed_field_name protoreflect.Name = "packed" FieldOptions_Jstype_field_name protoreflect.Name = "jstype" FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_UnverifiedLazy_field_name protoreflect.Name = "unverified_lazy" FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + FieldOptions_Retention_field_name protoreflect.Name = "retention" + FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_UnverifiedLazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.unverified_lazy" FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" + FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" + FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -547,8 +561,12 @@ const ( FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_UnverifiedLazy_field_number protoreflect.FieldNumber = 15 FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 + FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 + FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -564,6 +582,18 @@ const ( FieldOptions_JSType_enum_name = "JSType" ) +// Full and short names for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" + FieldOptions_OptionRetention_enum_name = "OptionRetention" +) + +// Full and short names for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" + FieldOptions_OptionTargetType_enum_name = "OptionTargetType" +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -590,20 +620,23 @@ const ( // Field names for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" - EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" - EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" - EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" - EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 - EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.EnumValueOptions. @@ -813,11 +846,13 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + GeneratedCodeInfo_Annotation_Semantic_field_name protoreflect.Name = "semantic" GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" + GeneratedCodeInfo_Annotation_Semantic_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.semantic" ) // Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. @@ -826,4 +861,11 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 + GeneratedCodeInfo_Annotation_Semantic_field_number protoreflect.FieldNumber = 5 +) + +// Full and short names for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" + GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 11a6128ba..185ef2efa 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -59,7 +59,6 @@ func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { default: return newSingularConverter(t, fd) } - panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) } var ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index fea589c45..61a84d341 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -87,7 +87,7 @@ func (sb *Builder) grow(n int) { // Unlike strings.Builder, we do not need to copy over the contents // of the old buffer since our builder provides no API for // retrieving previously created strings. - sb.buf = make([]byte, 2*(cap(sb.buf)+n)) + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) } func (sb *Builder) last(n int) string { diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index b480c5010..f7014cd51 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 28 - Patch = 1 + Minor = 30 + Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index 08d2a46f5..ec71e717f 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -5,16 +5,13 @@ // Package proto provides functions operating on protocol buffer messages. // // For documentation on protocol buffers in general, see: -// -// https://developers.google.com/protocol-buffers +// https://protobuf.dev. // // For a tutorial on using protocol buffers with Go, see: -// -// https://developers.google.com/protocol-buffers/docs/gotutorial +// https://protobuf.dev/getting-started/gotutorial. // // For a guide to generated Go protocol buffer code, see: -// -// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// https://protobuf.dev/reference/go/go-generated. // // # Binary serialization // diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 67948dd1d..1a0be1b03 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -5,30 +5,39 @@ package proto import ( - "bytes" - "math" "reflect" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/reflect/protoreflect" ) -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. +// Equal reports whether two messages are equal, +// by recursively comparing the fields of the message. // -// Two messages are equal if they belong to the same message descriptor, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. If either of the top-level -// messages are invalid, then Equal reports true only if both are invalid. +// - Bytes fields are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. // -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. +// - Floating-point fields are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Other scalar fields are equal if they contain the same value. +// +// - Message fields are equal if they have +// the same set of populated known and extension field values, and +// the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +// +// An invalid message is not equal to a valid message. +// An invalid message is only equal to another invalid message of the +// same type. An invalid message often corresponds to a nil pointer +// of the concrete message type. For example, (*pb.M)(nil) is not equal +// to &pb.M{}. +// If two valid messages marshal to the same bytes under deterministic +// serialization, then Equal is guaranteed to report true. func Equal(x, y Message) bool { if x == nil || y == nil { return x == nil && y == nil @@ -42,130 +51,7 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } - return equalMessage(mx, my) -} - -// equalMessage compares two messages. -func equalMessage(mx, my protoreflect.Message) bool { - if mx.Descriptor() != my.Descriptor() { - return false - } - - nx := 0 - equal := true - mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { - nx++ - vy := my.Get(fd) - equal = my.Has(fd) && equalField(fd, vx, vy) - return equal - }) - if !equal { - return false - } - ny := 0 - my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { - ny++ - return true - }) - if nx != ny { - return false - } - - return equalUnknown(mx.GetUnknown(), my.GetUnknown()) -} - -// equalField compares two fields. -func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { - switch { - case fd.IsList(): - return equalList(fd, x.List(), y.List()) - case fd.IsMap(): - return equalMap(fd, x.Map(), y.Map()) - default: - return equalValue(fd, x, y) - } -} - -// equalMap compares two maps. -func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool { - if x.Len() != y.Len() { - return false - } - equal := true - x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { - vy := y.Get(k) - equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) - return equal - }) - return equal -} - -// equalList compares two lists. -func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool { - if x.Len() != y.Len() { - return false - } - for i := x.Len() - 1; i >= 0; i-- { - if !equalValue(fd, x.Get(i), y.Get(i)) { - return false - } - } - return true -} - -// equalValue compares two singular values. -func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { - switch fd.Kind() { - case protoreflect.BoolKind: - return x.Bool() == y.Bool() - case protoreflect.EnumKind: - return x.Enum() == y.Enum() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, - protoreflect.Int64Kind, protoreflect.Sint64Kind, - protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: - return x.Int() == y.Int() - case protoreflect.Uint32Kind, protoreflect.Uint64Kind, - protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: - return x.Uint() == y.Uint() - case protoreflect.FloatKind, protoreflect.DoubleKind: - fx := x.Float() - fy := y.Float() - if math.IsNaN(fx) || math.IsNaN(fy) { - return math.IsNaN(fx) && math.IsNaN(fy) - } - return fx == fy - case protoreflect.StringKind: - return x.String() == y.String() - case protoreflect.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case protoreflect.MessageKind, protoreflect.GroupKind: - return equalMessage(x.Message(), y.Message()) - default: - return x.Interface() == y.Interface() - } -} - -// equalUnknown compares unknown fields by direct comparison on the raw bytes -// of each individual field number. -func equalUnknown(x, y protoreflect.RawFields) bool { - if len(x) != len(y) { - return false - } - if bytes.Equal([]byte(x), []byte(y)) { - return true - } - - mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) - my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) - for len(x) > 0 { - fnum, _, n := protowire.ConsumeField(x) - mx[fnum] = append(mx[fnum], x[:n]...) - x = x[n:] - } - for len(y) > 0 { - fnum, _, n := protowire.ConsumeField(y) - my[fnum] = append(my[fnum], y[:n]...) - y = y[n:] - } - return reflect.DeepEqual(mx, my) + vx := protoreflect.ValueOfMessage(mx) + vy := protoreflect.ValueOfMessage(my) + return vx.Equal(vy) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index b03c1223c..54ce326df 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,6 +35,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) + case 13: + b = p.appendSingularField(b, "edition", nil) } return b } @@ -236,6 +238,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 7: b = p.appendSingularField(b, "map_entry", nil) + case 11: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -279,6 +283,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "allow_alias", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) + case 6: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -345,10 +351,18 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "jstype", nil) case 5: b = p.appendSingularField(b, "lazy", nil) + case 15: + b = p.appendSingularField(b, "unverified_lazy", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) case 10: b = p.appendSingularField(b, "weak", nil) + case 16: + b = p.appendSingularField(b, "debug_redact", nil) + case 17: + b = p.appendSingularField(b, "retention", nil) + case 18: + b = p.appendSingularField(b, "target", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index f31981077..37601b781 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -148,7 +148,7 @@ type Message interface { // be preserved in marshaling or other operations. IsValid() bool - // ProtoMethods returns optional fast-path implementions of various operations. + // ProtoMethods returns optional fast-path implementations of various operations. // This method may return nil. // // The returned methods type is identical to diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go new file mode 100644 index 000000000..591652541 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -0,0 +1,168 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "bytes" + "fmt" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +// Equal reports whether v1 and v2 are recursively equal. +// +// - Values of different types are always unequal. +// +// - Bytes values are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. +// +// - Floating point values are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Enums are equal if they contain the same number. +// Since Value does not contain an enum descriptor, +// enum values do not consider the type of the enum. +// +// - Other scalar values are equal if they contain the same value. +// +// - Message values are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +func (v1 Value) Equal(v2 Value) bool { + return equalValue(v1, v2) +} + +func equalValue(x, y Value) bool { + eqType := x.typ == y.typ + switch x.typ { + case nilType: + return eqType + case boolType: + return eqType && x.Bool() == y.Bool() + case int32Type, int64Type: + return eqType && x.Int() == y.Int() + case uint32Type, uint64Type: + return eqType && x.Uint() == y.Uint() + case float32Type, float64Type: + return eqType && equalFloat(x.Float(), y.Float()) + case stringType: + return eqType && x.String() == y.String() + case bytesType: + return eqType && bytes.Equal(x.Bytes(), y.Bytes()) + case enumType: + return eqType && x.Enum() == y.Enum() + default: + switch x := x.Interface().(type) { + case Message: + y, ok := y.Interface().(Message) + return ok && equalMessage(x, y) + case List: + y, ok := y.Interface().(List) + return ok && equalList(x, y) + case Map: + y, ok := y.Interface().(Map) + return ok && equalMap(x, y) + default: + panic(fmt.Sprintf("unknown type: %T", x)) + } + } +} + +// equalFloat compares two floats, where NaNs are treated as equal. +func equalFloat(x, y float64) bool { + if math.IsNaN(x) || math.IsNaN(y) { + return math.IsNaN(x) && math.IsNaN(y) + } + return x == y +} + +// equalMessage compares two messages. +func equalMessage(mx, my Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd FieldDescriptor, vx Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalValue(vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd FieldDescriptor, vx Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalList compares two lists. +func equalList(x, y List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalMap compares two maps. +func equalMap(x, y Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k MapKey, vx Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(vx, vy) + return equal + }) + return equal +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[FieldNumber]RawFields) + my := make(map[FieldNumber]RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index ca8e28c5b..08e5ef73f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -54,11 +54,11 @@ import ( // // Append a 0 to a "repeated int32" field. // // Since the Value returned by Mutable is guaranteed to alias // // the source message, modifying the Value modifies the message. -// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// message.Mutable(fieldDesc).List().Append(protoreflect.ValueOfInt32(0)) // // // Assign [0] to a "repeated int32" field by creating a new Value, // // modifying it, and assigning it. -// list := message.NewField(fieldDesc).(List) +// list := message.NewField(fieldDesc).List() // list.Append(protoreflect.ValueOfInt32(0)) // message.Set(fieldDesc, list) // // ERROR: Since it is not defined whether Set aliases the source, diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 58352a697..aeb559774 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -46,7 +46,7 @@ var conflictPolicy = "panic" // "panic" | "warn" | "ignore" // It is a variable so that the behavior is easily overridden in another file. var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" - const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + const faq = "https://protobuf.dev/reference/go/faq#namespace-conflict" policy := conflictPolicy if v := os.Getenv(env); v != "" { policy = v diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index abe4ab511..dac5671db 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -406,6 +406,152 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} } +// If set to RETENTION_SOURCE, the option will be omitted from the binary. +// Note: as of January 2023, support for this is in progress and does not yet +// have an effect (b/264593489). +type FieldOptions_OptionRetention int32 + +const ( + FieldOptions_RETENTION_UNKNOWN FieldOptions_OptionRetention = 0 + FieldOptions_RETENTION_RUNTIME FieldOptions_OptionRetention = 1 + FieldOptions_RETENTION_SOURCE FieldOptions_OptionRetention = 2 +) + +// Enum value maps for FieldOptions_OptionRetention. +var ( + FieldOptions_OptionRetention_name = map[int32]string{ + 0: "RETENTION_UNKNOWN", + 1: "RETENTION_RUNTIME", + 2: "RETENTION_SOURCE", + } + FieldOptions_OptionRetention_value = map[string]int32{ + "RETENTION_UNKNOWN": 0, + "RETENTION_RUNTIME": 1, + "RETENTION_SOURCE": 2, + } +) + +func (x FieldOptions_OptionRetention) Enum() *FieldOptions_OptionRetention { + p := new(FieldOptions_OptionRetention) + *p = x + return p +} + +func (x FieldOptions_OptionRetention) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +} + +func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[5] +} + +func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionRetention) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionRetention(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionRetention.Descriptor instead. +func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 2} +} + +// This indicates the types of entities that the field may apply to when used +// as an option. If it is unset, then the field may be freely used as an +// option on any kind of entity. Note: as of January 2023, support for this is +// in progress and does not yet have an effect (b/264593489). +type FieldOptions_OptionTargetType int32 + +const ( + FieldOptions_TARGET_TYPE_UNKNOWN FieldOptions_OptionTargetType = 0 + FieldOptions_TARGET_TYPE_FILE FieldOptions_OptionTargetType = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE FieldOptions_OptionTargetType = 2 + FieldOptions_TARGET_TYPE_MESSAGE FieldOptions_OptionTargetType = 3 + FieldOptions_TARGET_TYPE_FIELD FieldOptions_OptionTargetType = 4 + FieldOptions_TARGET_TYPE_ONEOF FieldOptions_OptionTargetType = 5 + FieldOptions_TARGET_TYPE_ENUM FieldOptions_OptionTargetType = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY FieldOptions_OptionTargetType = 7 + FieldOptions_TARGET_TYPE_SERVICE FieldOptions_OptionTargetType = 8 + FieldOptions_TARGET_TYPE_METHOD FieldOptions_OptionTargetType = 9 +) + +// Enum value maps for FieldOptions_OptionTargetType. +var ( + FieldOptions_OptionTargetType_name = map[int32]string{ + 0: "TARGET_TYPE_UNKNOWN", + 1: "TARGET_TYPE_FILE", + 2: "TARGET_TYPE_EXTENSION_RANGE", + 3: "TARGET_TYPE_MESSAGE", + 4: "TARGET_TYPE_FIELD", + 5: "TARGET_TYPE_ONEOF", + 6: "TARGET_TYPE_ENUM", + 7: "TARGET_TYPE_ENUM_ENTRY", + 8: "TARGET_TYPE_SERVICE", + 9: "TARGET_TYPE_METHOD", + } + FieldOptions_OptionTargetType_value = map[string]int32{ + "TARGET_TYPE_UNKNOWN": 0, + "TARGET_TYPE_FILE": 1, + "TARGET_TYPE_EXTENSION_RANGE": 2, + "TARGET_TYPE_MESSAGE": 3, + "TARGET_TYPE_FIELD": 4, + "TARGET_TYPE_ONEOF": 5, + "TARGET_TYPE_ENUM": 6, + "TARGET_TYPE_ENUM_ENTRY": 7, + "TARGET_TYPE_SERVICE": 8, + "TARGET_TYPE_METHOD": 9, + } +) + +func (x FieldOptions_OptionTargetType) Enum() *FieldOptions_OptionTargetType { + p := new(FieldOptions_OptionTargetType) + *p = x + return p +} + +func (x FieldOptions_OptionTargetType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() +} + +func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[6] +} + +func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionTargetType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionTargetType(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionTargetType.Descriptor instead. +func (FieldOptions_OptionTargetType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 3} +} + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe // methods, and PUT verb for idempotent methods instead of the default POST. @@ -442,11 +588,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -468,6 +614,70 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +// Represents the identified object's effect on the element in the original +// .proto file. +type GeneratedCodeInfo_Annotation_Semantic int32 + +const ( + // There is no effect or the effect is indescribable. + GeneratedCodeInfo_Annotation_NONE GeneratedCodeInfo_Annotation_Semantic = 0 + // The element is set or otherwise mutated. + GeneratedCodeInfo_Annotation_SET GeneratedCodeInfo_Annotation_Semantic = 1 + // An alias to the element is returned. + GeneratedCodeInfo_Annotation_ALIAS GeneratedCodeInfo_Annotation_Semantic = 2 +) + +// Enum value maps for GeneratedCodeInfo_Annotation_Semantic. +var ( + GeneratedCodeInfo_Annotation_Semantic_name = map[int32]string{ + 0: "NONE", + 1: "SET", + 2: "ALIAS", + } + GeneratedCodeInfo_Annotation_Semantic_value = map[string]int32{ + "NONE": 0, + "SET": 1, + "ALIAS": 2, + } +) + +func (x GeneratedCodeInfo_Annotation_Semantic) Enum() *GeneratedCodeInfo_Annotation_Semantic { + p := new(GeneratedCodeInfo_Annotation_Semantic) + *p = x + return p +} + +func (x GeneratedCodeInfo_Annotation_Semantic) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() +} + +func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[8] +} + +func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GeneratedCodeInfo_Annotation_Semantic(num) + return nil +} + +// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. +func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} +} + // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { @@ -544,8 +754,12 @@ type FileDescriptorProto struct { // development tools. SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. - // The supported values are "proto2" and "proto3". + // The supported values are "proto2", "proto3", and "editions". + // + // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + // The edition of the proto file, which is an opaque string. + Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -664,6 +878,13 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } +func (x *FileDescriptorProto) GetEdition() string { + if x != nil && x.Edition != nil { + return *x.Edition + } + return "" +} + // Describes a message type. type DescriptorProto struct { state protoimpl.MessageState @@ -860,7 +1081,6 @@ type FieldDescriptorProto struct { // For booleans, "true" or "false". // For strings, contains the default text contents (not escaped in any way). // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` // If set, gives the index of a oneof in the containing type's oneof_decl // list. This field is a member of that oneof. @@ -1382,22 +1602,22 @@ type FileOptions struct { // inappropriate because proto packages do not normally start with backwards // domain names. JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). + // Controls the name of the wrapper Java class generated for the .proto file. + // That class will always contain the .proto file's getDescriptor() method as + // well as any top-level extensions defined in the .proto file. + // If java_multiple_files is disabled, then all the other classes from the + // .proto file will be nested inside the single wrapper outer class. JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java + // If enabled, then the Java code generator will generate a separate .java // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be + // file. Thus, these types will *not* be nested inside the wrapper class + // named by java_outer_classname. However, the wrapper class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 @@ -1531,7 +1751,7 @@ func (x *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { if x != nil && x.JavaGenerateEqualsAndHash != nil { return *x.JavaGenerateEqualsAndHash @@ -1670,10 +1890,12 @@ type MessageOptions struct { // efficient, has fewer features, and is more complicated. // // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } + // + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // // Note that the message cannot have any defined fields; MessageSets only // have extensions. // @@ -1692,28 +1914,44 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + // // Whether the message is an automatically generated map entry type for the // maps field. // // For maps fields: - // map map_field = 1; + // + // map map_field = 1; + // // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; + // + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; // // Implementations may choose not to generate the map_entry=true message, but // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // + // This should only be used as a temporary measure against broken builds due + // to the change in behavior for JSON field name conflicts. + // + // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // teams have had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -1785,6 +2023,14 @@ func (x *MessageOptions) GetMapEntry() bool { return false } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1838,7 +2084,6 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // // Note that implementations may choose not to check required fields within // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. @@ -1849,7 +2094,14 @@ type FieldOptions struct { // implementation must either *always* check its required fields, or *never* // check its required fields, regardless of whether or not the message has // been parsed. + // + // As of May 2022, lazy verifies the contents of the byte stream during + // parsing. An invalid byte stream will cause the overall parsing to fail. Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // unverified_lazy does no correctness checks on the byte stream. This should + // only be used where lazy with verification is prohibitive for performance + // reasons. + UnverifiedLazy *bool `protobuf:"varint,15,opt,name=unverified_lazy,json=unverifiedLazy,def=0" json:"unverified_lazy,omitempty"` // Is this field deprecated? // Depending on the target platform, this can emit Deprecated annotations // for accessors, or it will be completely ignored; in the very least, this @@ -1857,17 +2109,24 @@ type FieldOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // For Google-internal migration only. Do not use. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // Indicate that the field value should not be printed out when using debug + // formats, e.g. when the field contains sensitive credentials. + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for FieldOptions fields. const ( - Default_FieldOptions_Ctype = FieldOptions_STRING - Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL - Default_FieldOptions_Lazy = bool(false) - Default_FieldOptions_Deprecated = bool(false) - Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_UnverifiedLazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_DebugRedact = bool(false) ) func (x *FieldOptions) Reset() { @@ -1930,6 +2189,13 @@ func (x *FieldOptions) GetLazy() bool { return Default_FieldOptions_Lazy } +func (x *FieldOptions) GetUnverifiedLazy() bool { + if x != nil && x.UnverifiedLazy != nil { + return *x.UnverifiedLazy + } + return Default_FieldOptions_UnverifiedLazy +} + func (x *FieldOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -1944,6 +2210,27 @@ func (x *FieldOptions) GetWeak() bool { return Default_FieldOptions_Weak } +func (x *FieldOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_FieldOptions_DebugRedact +} + +func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { + if x != nil && x.Retention != nil { + return *x.Retention + } + return FieldOptions_RETENTION_UNKNOWN +} + +func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { + if x != nil && x.Target != nil { + return *x.Target + } + return FieldOptions_TARGET_TYPE_UNKNOWN +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2014,6 +2301,15 @@ type EnumOptions struct { // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2069,6 +2365,14 @@ func (x *EnumOptions) GetDeprecated() bool { return Default_EnumOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2399,43 +2703,48 @@ type SourceCodeInfo struct { // tools. // // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } + // + // message Foo { + // optional string foo = 1; + // } + // // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi + // + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). // // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` } @@ -2715,8 +3024,8 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". +// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents +// "foo.(bar.baz).moo". type UninterpretedOption_NamePart struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2781,23 +3090,34 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] + // the root FileDescriptorProto to the place where the definition occurs. + // For example, this path: + // + // [ 4, 3, 2, 7, 1 ] + // // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 + // + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; + // + // repeated DescriptorProto message_type = 4; + // // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; + // + // repeated FieldDescriptorProto field = 2; + // // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; + // + // optional string name = 1; // // Thus, the above path gives the location of a field name. If we removed // the last element: - // [ 4, 3, 2, 7 ] + // + // [ 4, 3, 2, 7 ] + // // this path refers to the whole field declaration (from the beginning // of the label to the terminating semicolon). Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` @@ -2826,34 +3146,34 @@ type SourceCodeInfo_Location struct { // // Examples: // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; + // // Comment attached to moo. + // // + // // Another line attached to moo. + // optional double moo = 4; // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. + // // Detached comment for corge. This is not leading or trailing comments + // // to moo or corge because there are blank lines separating it from + // // both. // - // // Detached comment for corge paragraph 2. + // // Detached comment for corge paragraph 2. // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; // - // // ignored detached comments. + // // ignored detached comments. LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` @@ -2940,9 +3260,10 @@ type GeneratedCodeInfo_Annotation struct { // that relates to the identified object. Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past + // relates to the identified object. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` } func (x *GeneratedCodeInfo_Annotation) Reset() { @@ -3005,6 +3326,13 @@ func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { return 0 } +func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotation_Semantic { + if x != nil && x.Semantic != nil { + return *x.Semantic + } + return GeneratedCodeInfo_Annotation_NONE +} + var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor var file_google_protobuf_descriptor_proto_rawDesc = []byte{ @@ -3016,7 +3344,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3054,330 +3382,391 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, - 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, - 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, + 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, - 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, - 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, - 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, - 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, - 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, - 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, - 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, - 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, - 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, - 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, - 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, - 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, - 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, - 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, - 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, - 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, - 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, - 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, + 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, + 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, + 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, + 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, + 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, + 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, + 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, + 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, + 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, + 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, + 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, + 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, + 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, + 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, + 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, + 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, + 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, + 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, + 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, + 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, + 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, + 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, + 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, - 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, - 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, - 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, - 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, - 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, - 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, - 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, + 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, + 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, + 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, + 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, + 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, + 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, + 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, - 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, + 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, + 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, 0x03, 0x0a, + 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, + 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, + 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, + 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xb7, 0x08, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, - 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, - 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, - 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, - 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, - 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, - 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, - 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, @@ -3385,97 +3774,95 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, + 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, + 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, + 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, + 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, + 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, + 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, - 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, - 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, - 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, - 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, - 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, - 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, - 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, - 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, + 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, + 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, + 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, + 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, + 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, @@ -3498,7 +3885,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 9) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type @@ -3506,84 +3893,90 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType - (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel - (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 16: google.protobuf.FileOptions - (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange - (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation + (FieldOptions_OptionRetention)(0), // 5: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 6: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 7: google.protobuf.MethodOptions.IdempotencyLevel + (GeneratedCodeInfo_Annotation_Semantic)(0), // 8: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 9: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 10: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 11: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 12: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 13: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 14: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 15: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 16: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 17: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 18: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 19: google.protobuf.FileOptions + (*MessageOptions)(nil), // 20: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 21: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 22: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 23: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 24: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 25: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 26: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 27: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 28: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 29: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 30: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 31: google.protobuf.DescriptorProto.ReservedRange + (*EnumDescriptorProto_EnumReservedRange)(nil), // 32: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 33: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 34: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 35: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 10, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 11, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 15, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 17, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 13, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 28, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 13, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 13, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 11, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 15, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 30, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 14, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 20, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 31, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 27, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 21, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 22, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 16, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 23, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 32, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 24, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 18, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 25, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 26, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 43, // [43:43] is the sub-list for method output_type - 43, // [43:43] is the sub-list for method input_type - 43, // [43:43] is the sub-list for extension type_name - 43, // [43:43] is the sub-list for extension extendee - 0, // [0:43] is the sub-list for field type_name + 5, // 32: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 6, // 33: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType + 27, // 34: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 35: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 36: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 37: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 38: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 7, // 39: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 27, // 40: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 33, // 41: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 34, // 42: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 35, // 43: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 12, // 44: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 8, // 45: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 46, // [46:46] is the sub-list for method output_type + 46, // [46:46] is the sub-list for method input_type + 46, // [46:46] is the sub-list for extension type_name + 46, // [46:46] is the sub-list for extension extendee + 0, // [0:46] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -3940,7 +4333,7 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 6, + NumEnums: 9, NumMessages: 27, NumExtensions: 0, NumServices: 0, diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 8c10797b9..a6c7a33f3 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -37,8 +37,7 @@ // It is functionally a tuple of the full name of the remote message type and // the serialized bytes of the remote message value. // -// -// Constructing an Any +// # Constructing an Any // // An Any message containing another message value is constructed using New: // @@ -48,8 +47,7 @@ // } // ... // make use of any // -// -// Unmarshaling an Any +// # Unmarshaling an Any // // With a populated Any message, the underlying message can be serialized into // a remote concrete message value in a few ways. @@ -95,8 +93,7 @@ // listed in the case clauses are linked into the Go binary and therefore also // registered in the global registry. // -// -// Type checking an Any +// # Type checking an Any // // In order to type check whether an Any message represents some other message, // then use the MessageIs method: @@ -115,7 +112,6 @@ // } // ... // make use of m // } -// package anypb import ( @@ -136,45 +132,49 @@ import ( // // Example 1: Pack and unpack a message in C++. // -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } // // Example 2: Pack and unpack a message in Java. // -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack @@ -182,35 +182,33 @@ import ( // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // +// # JSON // -// JSON -// ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: // -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } // -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } // // If the embedded message type is well-known and has a custom JSON // representation, that representation will be embedded adding a field // `value` which holds the custom JSON in addition to the `@type` // field. Example (for message [google.protobuf.Duration][]): // -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } type Any struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -228,14 +226,14 @@ type Any struct { // scheme `http`, `https`, or no scheme, one can optionally set up a type // server that maps type URLs to message definitions as follows: // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) + // - If no scheme is provided, `https` is assumed. + // - An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // - Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with @@ -243,7 +241,6 @@ type Any struct { // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. - // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index a583ca2f6..df709a8dd 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -35,8 +35,7 @@ // // The Duration message represents a signed span of time. // -// -// Conversion to a Go Duration +// # Conversion to a Go Duration // // The AsDuration method can be used to convert a Duration message to a // standard Go time.Duration value: @@ -65,15 +64,13 @@ // the resulting value to the closest representable value (e.g., math.MaxInt64 // for positive overflow and math.MinInt64 for negative overflow). // -// -// Conversion from a Go Duration +// # Conversion from a Go Duration // // The durationpb.New function can be used to construct a Duration message // from a standard Go time.Duration value: // // dur := durationpb.New(d) // ... // make use of d as a *durationpb.Duration -// package durationpb import ( @@ -96,43 +93,43 @@ import ( // // Example 1: Compute Duration from two Timestamps in pseudo code. // -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; // -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; // -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (duration.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } // // Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. // -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; // -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; // -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } // // Example 3: Compute Duration from datetime.timedelta in Python. // -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) // // # JSON Mapping // @@ -143,8 +140,6 @@ import ( // encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". -// -// type Duration struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index c9ae92132..61f69fc11 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -36,8 +36,7 @@ // The Timestamp message represents a timestamp, // an instant in time since the Unix epoch (January 1st, 1970). // -// -// Conversion to a Go Time +// # Conversion to a Go Time // // The AsTime method can be used to convert a Timestamp message to a // standard Go time.Time value in UTC: @@ -59,8 +58,7 @@ // ... // handle error // } // -// -// Conversion from a Go Time +// # Conversion from a Go Time // // The timestamppb.New function can be used to construct a Timestamp message // from a standard Go time.Time value: @@ -72,7 +70,6 @@ // // ts := timestamppb.Now() // ... // make use of ts as a *timestamppb.Timestamp -// package timestamppb import ( @@ -101,52 +98,50 @@ import ( // // Example 1: Compute Timestamp from POSIX `time()`. // -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); // // Example 2: Compute Timestamp from POSIX `gettimeofday()`. // -// struct timeval tv; -// gettimeofday(&tv, NULL); +// struct timeval tv; +// gettimeofday(&tv, NULL); // -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); // // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. // -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); // // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. // -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// long millis = System.currentTimeMillis(); // +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); // // Example 5: Compute Timestamp from Java `Instant.now()`. // -// Instant now = Instant.now(); -// -// Timestamp timestamp = -// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) -// .setNanos(now.getNano()).build(); +// Instant now = Instant.now(); // +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); // // Example 6: Compute Timestamp from current time in Python. // -// timestamp = Timestamp() -// timestamp.GetCurrentTime() +// timestamp = Timestamp() +// timestamp.GetCurrentTime() // // # JSON Mapping // @@ -174,8 +169,6 @@ import ( // the Joda Time's [`ISODateTimeFormat.dateTime()`]( // http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D // ) to obtain a formatter capable of generating timestamps in this format. -// -// type Timestamp struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/modules.txt b/vendor/modules.txt index d75ce93b0..432f9d661 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -43,7 +43,7 @@ github.com/VividCortex/ewma # github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d ## explicit github.com/acarl005/stripansi -# github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d +# github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator # github.com/chzyer/readline v1.5.1 @@ -81,8 +81,8 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v1.2.0 ## explicit; go 1.17 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/image/v5 v5.24.2 -## explicit; go 1.17 +# github.com/containers/image/v5 v5.25.0 +## explicit; go 1.18 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -107,6 +107,7 @@ github.com/containers/image/v5/internal/pkg/platform github.com/containers/image/v5/internal/private github.com/containers/image/v5/internal/putblobdigest github.com/containers/image/v5/internal/rootless +github.com/containers/image/v5/internal/set github.com/containers/image/v5/internal/signature github.com/containers/image/v5/internal/signer github.com/containers/image/v5/internal/streamdigest @@ -300,19 +301,19 @@ github.com/go-openapi/jsonreference/internal # github.com/go-openapi/loads v0.21.2 ## explicit; go 1.13 github.com/go-openapi/loads -# github.com/go-openapi/runtime v0.24.1 -## explicit; go 1.15 +# github.com/go-openapi/runtime v0.25.0 +## explicit; go 1.18 github.com/go-openapi/runtime -# github.com/go-openapi/spec v0.20.7 +# github.com/go-openapi/spec v0.20.8 ## explicit; go 1.13 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.21.5 -## explicit; go 1.13 +# github.com/go-openapi/strfmt v0.21.7 +## explicit; go 1.19 github.com/go-openapi/strfmt # github.com/go-openapi/swag v0.22.3 ## explicit; go 1.18 github.com/go-openapi/swag -# github.com/go-openapi/validate v0.22.0 +# github.com/go-openapi/validate v0.22.1 ## explicit; go 1.14 github.com/go-openapi/validate # github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 @@ -528,14 +529,16 @@ github.com/proglottis/gpgme # github.com/rivo/uniseg v0.4.4 ## explicit; go 1.18 github.com/rivo/uniseg +# github.com/rogpeppe/go-internal v1.10.0 +## explicit; go 1.19 # github.com/seccomp/libseccomp-golang v0.10.0 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/sigstore/fulcio v1.1.0 +# github.com/sigstore/fulcio v1.2.0 ## explicit; go 1.20 github.com/sigstore/fulcio/pkg/certificate -# github.com/sigstore/rekor v1.0.1 -## explicit; go 1.18 +# github.com/sigstore/rekor v1.1.0 +## explicit; go 1.19 github.com/sigstore/rekor/pkg/generated/models # github.com/sigstore/sigstore v1.6.0 ## explicit; go 1.18 @@ -585,12 +588,12 @@ github.com/ulikunitz/xz/lzma github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v7 v7.5.3 -## explicit; go 1.14 -github.com/vbauerster/mpb/v7 -github.com/vbauerster/mpb/v7/cwriter -github.com/vbauerster/mpb/v7/decor -github.com/vbauerster/mpb/v7/internal +# github.com/vbauerster/mpb/v8 v8.3.0 +## explicit; go 1.17 +github.com/vbauerster/mpb/v8 +github.com/vbauerster/mpb/v8/cwriter +github.com/vbauerster/mpb/v8/decor +github.com/vbauerster/mpb/v8/internal # github.com/vishvananda/netlink v1.2.1-beta.2 ## explicit; go 1.12 github.com/vishvananda/netlink @@ -601,7 +604,7 @@ github.com/vishvananda/netns # go.etcd.io/bbolt v1.3.7 ## explicit; go 1.17 go.etcd.io/bbolt -# go.mongodb.org/mongo-driver v1.11.1 +# go.mongodb.org/mongo-driver v1.11.3 ## explicit; go 1.13 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec @@ -646,6 +649,11 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts +# golang.org/x/exp v0.0.0-20230321023759-10a507213a29 +## explicit; go 1.18 +golang.org/x/exp/constraints +golang.org/x/exp/maps +golang.org/x/exp/slices # golang.org/x/mod v0.9.0 ## explicit; go 1.17 golang.org/x/mod/semver @@ -722,7 +730,7 @@ golang.org/x/tools/internal/typesinternal # google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.53.0 +# google.golang.org/grpc v1.54.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes @@ -772,7 +780,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.28.1 +# google.golang.org/protobuf v1.30.0 ## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext