From 60f73b6cf1a8868efe1123ba00e56b83e21e2e42 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Tue, 8 Jan 2019 17:38:54 -0800 Subject: [PATCH 01/77] Support simultaneous image unpack. Signed-off-by: Lantao Liu --- client.go | 1 + pull.go | 34 +++++++- unpacker.go | 238 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 270 insertions(+), 3 deletions(-) create mode 100644 unpacker.go diff --git a/client.go b/client.go index 8ea7d79729ff..aa626abcb8a2 100644 --- a/client.go +++ b/client.go @@ -294,6 +294,7 @@ type RemoteContext struct { PlatformMatcher platforms.MatchComparer // Unpack is done after an image is pulled to extract into a snapshotter. + // It is done simultaneously for schema 2 images when they are pulled. // If an image is not unpacked on pull, it can be unpacked any time // afterwards. Unpacking is required to run an image. Unpack bool diff --git a/pull.go b/pull.go index 3a91daba4472..ef0d147baa1e 100644 --- a/pull.go +++ b/pull.go @@ -32,7 +32,7 @@ import ( // Pull downloads the provided content into containerd's content store // and returns a platform specific image object -func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image, error) { +func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Image, retErr error) { pullCtx := defaultRemoteContext() for _, o := range opts { if err := o(c, pullCtx); err != nil { @@ -61,6 +61,30 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image } defer done(ctx) + var unpacks int32 + if pullCtx.Unpack { + // unpacker only supports schema 2 image, for schema 1 this is noop. + u, err := c.newUnpacker(ctx, pullCtx) + if err != nil { + return nil, errors.Wrap(err, "create unpacker") + } + unpackWrapper, eg := u.handlerWrapper(ctx, &unpacks) + defer func() { + if err := eg.Wait(); err != nil { + if retErr == nil { + retErr = errors.Wrap(err, "unpack") + } + } + }() + wrapper := pullCtx.HandlerWrapper + pullCtx.HandlerWrapper = func(h images.Handler) images.Handler { + if wrapper == nil { + return unpackWrapper(h) + } + return wrapper(unpackWrapper(h)) + } + } + img, err := c.fetch(ctx, pullCtx, ref, 1) if err != nil { return nil, err @@ -69,8 +93,12 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher) if pullCtx.Unpack { - if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil { - return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter) + if unpacks == 0 { + // Try to unpack is none is done previously. + // This is at least required for schema 1 image. + if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil { + return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter) + } } } diff --git a/unpacker.go b/unpacker.go new file mode 100644 index 000000000000..89c395cbbc31 --- /dev/null +++ b/unpacker.go @@ -0,0 +1,238 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "sync/atomic" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/rootfs" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +type layerState struct { + layer rootfs.Layer + downloaded bool + unpacked bool +} + +type unpacker struct { + updateCh chan ocispec.Descriptor + snapshotter string + config UnpackConfig + c *Client +} + +func (c *Client) newUnpacker(ctx context.Context, rCtx *RemoteContext) (*unpacker, error) { + snapshotter, err := c.resolveSnapshotterName(ctx, rCtx.Snapshotter) + if err != nil { + return nil, err + } + var config UnpackConfig + for _, o := range rCtx.UnpackOpts { + if err := o(ctx, &config); err != nil { + return nil, err + } + } + return &unpacker{ + updateCh: make(chan ocispec.Descriptor, 128), + snapshotter: snapshotter, + config: config, + c: c, + }, nil +} + +func (u *unpacker) unpack(ctx context.Context, config ocispec.Descriptor, layers []ocispec.Descriptor) error { + p, err := content.ReadBlob(ctx, u.c.ContentStore(), config) + if err != nil { + return err + } + + var i ocispec.Image + if err := json.Unmarshal(p, &i); err != nil { + return errors.Wrap(err, "unmarshal image config") + } + diffIDs := i.RootFS.DiffIDs + if len(layers) != len(diffIDs) { + return errors.Errorf("number of layers and diffIDs don't match: %d != %d", len(layers), len(diffIDs)) + } + + var ( + sn = u.c.SnapshotService(u.snapshotter) + a = u.c.DiffService() + cs = u.c.ContentStore() + + states []layerState + chain []digest.Digest + ) + for i, desc := range layers { + states = append(states, layerState{ + layer: rootfs.Layer{ + Blob: desc, + Diff: ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Digest: diffIDs[i], + }, + }, + }) + } + for { + var layer ocispec.Descriptor + select { + case layer = <-u.updateCh: + case <-ctx.Done(): + return ctx.Err() + } + log.G(ctx).WithField("desc", layer).Debug("layer downloaded") + for i := range states { + if states[i].layer.Blob.Digest != layer.Digest { + continue + } + states[i].downloaded = true + break + } + for i := range states { + if !states[i].downloaded { + break + } + if states[i].unpacked { + continue + } + + log.G(ctx).WithFields(logrus.Fields{ + "desc": states[i].layer.Blob, + "diff": states[i].layer.Diff, + }).Debug("unpack layer") + + unpacked, err := rootfs.ApplyLayerWithOpts(ctx, states[i].layer, chain, sn, a, + u.config.SnapshotOpts, u.config.ApplyOpts) + if err != nil { + return err + } + + if unpacked { + // Set the uncompressed label after the uncompressed + // digest has been verified through apply. + cinfo := content.Info{ + Digest: states[i].layer.Blob.Digest, + Labels: map[string]string{ + "containerd.io/uncompressed": states[i].layer.Diff.Digest.String(), + }, + } + if _, err := cs.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil { + return err + } + } + + chain = append(chain, states[i].layer.Diff.Digest) + states[i].unpacked = true + log.G(ctx).WithFields(logrus.Fields{ + "desc": states[i].layer.Blob, + "diff": states[i].layer.Diff, + }).Debug("layer unpacked") + } + // Check whether all layers are unpacked. + if states[len(states)-1].unpacked { + break + } + } + + chainID := identity.ChainID(chain).String() + cinfo := content.Info{ + Digest: config.Digest, + Labels: map[string]string{ + fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", u.snapshotter): chainID, + }, + } + _, err = cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", u.snapshotter)) + if err != nil { + return err + } + log.G(ctx).WithFields(logrus.Fields{ + "config": config.Digest, + "chainID": chainID, + }).Debug("image unpacked") + return nil +} + +func (u *unpacker) handlerWrapper(uctx context.Context, unpacks *int32) (func(images.Handler) images.Handler, *errgroup.Group) { + eg, uctx := errgroup.WithContext(uctx) + return func(f images.Handler) images.Handler { + var ( + lock sync.Mutex + layers []ocispec.Descriptor + schema1 bool + ) + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f.Handle(ctx, desc) + if err != nil { + return children, err + } + + // `Pull` only supports one platform, so there is only + // one manifest to handle, and manifest list can be + // safely skipped. + // TODO: support multi-platform unpack. + switch desc.MediaType { + case images.MediaTypeDockerSchema1Manifest: + lock.Lock() + schema1 = true + lock.Unlock() + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + lock.Lock() + for _, child := range children { + if child.MediaType == images.MediaTypeDockerSchema2Config || + child.MediaType == ocispec.MediaTypeImageConfig { + continue + } + layers = append(layers, child) + } + lock.Unlock() + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + lock.Lock() + l := append([]ocispec.Descriptor{}, layers...) + lock.Unlock() + if len(l) > 0 { + atomic.AddInt32(unpacks, 1) + eg.Go(func() error { + return u.unpack(uctx, desc, l) + }) + } + case images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2Layer, + ocispec.MediaTypeImageLayerGzip, ocispec.MediaTypeImageLayer: + lock.Lock() + update := !schema1 + lock.Unlock() + if update { + u.updateCh <- desc + } + } + return children, nil + }) + }, eg +} From 03aafaa1cff235050f116bc228e63ea783f0b4ba Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Tue, 8 Jan 2019 17:44:08 -0800 Subject: [PATCH 02/77] Update the integration test. Signed-off-by: Lantao Liu --- client_unix_test.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/client_unix_test.go b/client_unix_test.go index 9711d1c3af3d..fdd6b5335ec6 100644 --- a/client_unix_test.go +++ b/client_unix_test.go @@ -20,6 +20,9 @@ package containerd import ( "runtime" + "testing" + + "github.com/containerd/containerd/platforms" ) const ( @@ -48,3 +51,20 @@ func init() { testImage = "docker.io/library/alpine:latest" } } + +func TestImagePullSchema1WithEmptyLayers(t *testing.T) { + client, err := newClient(t, address) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + ctx, cancel := testContext(t) + defer cancel() + + schema1TestImageWithEmptyLayers := "gcr.io/google-containers/busybox@sha256:d8d3bc2c183ed2f9f10e7258f84971202325ee6011ba137112e01e30f206de67" + _, err = client.Pull(ctx, schema1TestImageWithEmptyLayers, WithPlatform(platforms.DefaultString()), WithSchema1Conversion, WithPullUnpack) + if err != nil { + t.Fatal(err) + } +} From 6e2228df72e205120ee36733de7e38f7cf1e9eb8 Mon Sep 17 00:00:00 2001 From: Maksym Pavlenko Date: Thu, 22 Aug 2019 15:41:50 -0700 Subject: [PATCH 03/77] Vendor github.com/imdario/mergo Signed-off-by: Maksym Pavlenko --- vendor.conf | 1 + vendor/github.com/imdario/mergo/LICENSE | 28 +++ vendor/github.com/imdario/mergo/README.md | 238 ++++++++++++++++++++ vendor/github.com/imdario/mergo/doc.go | 44 ++++ vendor/github.com/imdario/mergo/map.go | 175 +++++++++++++++ vendor/github.com/imdario/mergo/merge.go | 255 ++++++++++++++++++++++ vendor/github.com/imdario/mergo/mergo.go | 97 ++++++++ 7 files changed, 838 insertions(+) create mode 100644 vendor/github.com/imdario/mergo/LICENSE create mode 100644 vendor/github.com/imdario/mergo/README.md create mode 100644 vendor/github.com/imdario/mergo/doc.go create mode 100644 vendor/github.com/imdario/mergo/map.go create mode 100644 vendor/github.com/imdario/mergo/merge.go create mode 100644 vendor/github.com/imdario/mergo/mergo.go diff --git a/vendor.conf b/vendor.conf index 4bcc15bc9c31..a516d91f3e7f 100644 --- a/vendor.conf +++ b/vendor.conf @@ -46,6 +46,7 @@ github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f github.com/hashicorp/golang-lru v0.5.1 go.opencensus.io v0.22.0 +github.com/imdario/mergo v0.3.7 # cri dependencies github.com/containerd/cri f1d492b0cdd14e76476ee4dd024696ce3634e501 # master diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE new file mode 100644 index 000000000000..686680298da2 --- /dev/null +++ b/vendor/github.com/imdario/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md new file mode 100644 index 000000000000..02fc81e0626e --- /dev/null +++ b/vendor/github.com/imdario/mergo/README.md @@ -0,0 +1,238 @@ +# Mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). + +[![GoDoc][3]][4] +[![GoCard][5]][6] +[![Build Status][1]][2] +[![Coverage Status][7]][8] +[![Sourcegraph][9]][10] +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) + +[1]: https://travis-ci.org/imdario/mergo.png +[2]: https://travis-ci.org/imdario/mergo +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://goreportcard.com/badge/imdario/mergo +[6]: https://goreportcard.com/report/github.com/imdario/mergo +[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[8]: https://coveralls.io/github/imdario/mergo?branch=master +[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[10]: https://sourcegraph.com/github.com/imdario/mergo?badge + +### Latest release + +[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). + +### Important note + +Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. + +If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). + +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: + +Buy Me a Coffee at ko-fi.com +[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) +[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) +Donate using Liberapay + +### Mergo in the wild + +- [moby/moby](https://github.com/moby/moby) +- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +- [vmware/dispatch](https://github.com/vmware/dispatch) +- [Shopify/themekit](https://github.com/Shopify/themekit) +- [imdario/zas](https://github.com/imdario/zas) +- [matcornic/hermes](https://github.com/matcornic/hermes) +- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) +- [kataras/iris](https://github.com/kataras/iris) +- [michaelsauter/crane](https://github.com/michaelsauter/crane) +- [go-task/task](https://github.com/go-task/task) +- [sensu/uchiwa](https://github.com/sensu/uchiwa) +- [ory/hydra](https://github.com/ory/hydra) +- [sisatech/vcli](https://github.com/sisatech/vcli) +- [dairycart/dairycart](https://github.com/dairycart/dairycart) +- [projectcalico/felix](https://github.com/projectcalico/felix) +- [resin-os/balena](https://github.com/resin-os/balena) +- [go-kivik/kivik](https://github.com/go-kivik/kivik) +- [Telefonica/govice](https://github.com/Telefonica/govice) +- [supergiant/supergiant](supergiant/supergiant) +- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) +- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) +- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) +- [elwinar/rambler](https://github.com/elwinar/rambler) +- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) +- [jfbus/impressionist](https://github.com/jfbus/impressionist) +- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) +- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) +- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) +- [thoas/picfit](https://github.com/thoas/picfit) +- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) +- [jnuthong/item_search](https://github.com/jnuthong/item_search) +- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) + +## Installation + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + +```go +if err := mergo.Merge(&dst, src); err != nil { + // ... +} +``` + +Also, you can merge overwriting values using the transformer `WithOverride`. + +```go +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... +} +``` + +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. + +```go +if err := mergo.Map(&dst, srcMap); err != nil { + // ... +} +``` + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. + +More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). + +### Nice example + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v2 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" +) + +type timeTransfomer struct { +} + +func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## Top Contributors + +[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) +[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) +[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) +[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) +[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) +[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) +[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) +[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) + + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go new file mode 100644 index 000000000000..6e9aa7baf354 --- /dev/null +++ b/vendor/github.com/imdario/mergo/doc.go @@ -0,0 +1,44 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mergo merges same-type structs and maps by setting default values in zero-value fields. + +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Usage + +From my own work-in-progress project: + + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 + } + + type FssnConfig struct { + Network networkConfig + } + + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, + } + + // Inside a function [...] + + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) + } + + // More code [...] + +*/ +package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go new file mode 100644 index 000000000000..3f5afa83a13c --- /dev/null +++ b/vendor/github.com/imdario/mergo/map.go @@ -0,0 +1,175 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + config.overwriteWithEmptyValue = true + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go new file mode 100644 index 000000000000..f8de6c54305a --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge.go @@ -0,0 +1,255 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" +) + +func hasExportedField(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasExportedField(dst.Field(i)) + } else { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +type Config struct { + Overwrite bool + AppendSlice bool + Transformers Transformers + overwriteWithEmptyValue bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + overwriteWithEmptySrc := config.overwriteWithEmptyValue + config.overwriteWithEmptyValue = false + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + + if config.Transformers != nil && !isEmptyValue(dst) { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasExportedField(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } + dst.SetMapIndex(key, dstSlice) + } + } + if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { + continue + } + + if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + case reflect.Slice: + if !dst.CanSet() { + break + } + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if src.IsNil() { + break + } + if src.Kind() != reflect.Interface { + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + default: + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go new file mode 100644 index 000000000000..a82fea2fdccc --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -0,0 +1,97 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs and maps are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem()) + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} From a1e3779cad30e491e6327af760af6f0ba27579c5 Mon Sep 17 00:00:00 2001 From: Maksym Pavlenko Date: Thu, 22 Aug 2019 15:50:58 -0700 Subject: [PATCH 04/77] Support config imports #3289 Signed-off-by: Maksym Pavlenko --- services/server/config/config.go | 107 +++++++++++++++-- services/server/config/config_test.go | 163 ++++++++++++++++++++++++++ 2 files changed, 262 insertions(+), 8 deletions(-) create mode 100644 services/server/config/config_test.go diff --git a/services/server/config/config.go b/services/server/config/config.go index 9f8b1537c836..7c47cd745946 100644 --- a/services/server/config/config.go +++ b/services/server/config/config.go @@ -17,12 +17,15 @@ package config import ( + "path/filepath" "strings" "github.com/BurntSushi/toml" + "github.com/imdario/mergo" + "github.com/pkg/errors" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/plugin" - "github.com/pkg/errors" ) // Config provides containerd configuration data for the server @@ -57,6 +60,8 @@ type Config struct { ProxyPlugins map[string]ProxyPlugin `toml:"proxy_plugins"` // Timeouts specified as a duration Timeouts map[string]string `toml:"timeouts"` + // Imports are additional file path list to config files that can overwrite main config file fields + Imports []string `toml:"imports"` StreamProcessors []StreamProcessor `toml:"stream_processors"` @@ -205,16 +210,102 @@ func (c *Config) Decode(p *plugin.Registration) (interface{}, error) { } // LoadConfig loads the containerd server config from the provided path -func LoadConfig(path string, v *Config) error { - if v == nil { - return errors.Wrapf(errdefs.ErrInvalidArgument, "argument v must not be nil") +func LoadConfig(path string, out *Config) error { + if out == nil { + return errors.Wrapf(errdefs.ErrInvalidArgument, "argument out must not be nil") + } + + var ( + loaded = map[string]bool{} + pending = []string{path} + ) + + for len(pending) > 0 { + path, pending = pending[0], pending[1:] + + // Check if a file at the given path already loaded to prevent circular imports + if _, ok := loaded[path]; ok { + continue + } + + config, err := loadConfigFile(path) + if err != nil { + return err + } + + if err := mergeConfig(out, config); err != nil { + return err + } + + imports, err := resolveImports(path, config.Imports) + if err != nil { + return err + } + + loaded[path] = true + pending = append(pending, imports...) } - md, err := toml.DecodeFile(path, v) + + // Fix up the list of config files loaded + out.Imports = []string{} + for path := range loaded { + out.Imports = append(out.Imports, path) + } + + return out.ValidateV2() +} + +// loadConfigFile decodes a TOML file at the given path +func loadConfigFile(path string) (*Config, error) { + config := &Config{} + md, err := toml.DecodeFile(path, &config) if err != nil { - return err + return nil, err } - v.md = md - return v.ValidateV2() + config.md = md + return config, nil +} + +// resolveImports resolves import strings list to absolute paths list: +// - If path contains *, glob pattern matching applied +// - Non abs path is relative to parent config file directory +// - Abs paths returned as is +func resolveImports(parent string, imports []string) ([]string, error) { + var out []string + + for _, path := range imports { + if strings.Contains(path, "*") { + matches, err := filepath.Glob(path) + if err != nil { + return nil, err + } + + out = append(out, matches...) + } else { + path = filepath.Clean(path) + if !filepath.IsAbs(path) { + path = filepath.Join(filepath.Dir(parent), path) + } + + out = append(out, path) + } + } + + return out, nil +} + +// mergeConfig merges Config structs with the following rules: +// 'to' 'from' 'result' overwrite? +// "" "value" "value" yes +// "value" "" "value" no +// 1 0 1 no +// 0 1 1 yes +// []{"1"} []{"2"} []{"2"} yes +// []{"1"} []{} []{"1"} no +func mergeConfig(to, from *Config) error { + return mergo.Merge(to, from, func(config *mergo.Config) { + config.Overwrite = true + }) } // V1DisabledFilter matches based on ID diff --git a/services/server/config/config_test.go b/services/server/config/config_test.go new file mode 100644 index 000000000000..56f9cb030c35 --- /dev/null +++ b/services/server/config/config_test.go @@ -0,0 +1,163 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "gotest.tools/assert" +) + +func TestMergeConfigs(t *testing.T) { + a := &Config{ + Version: 2, + Root: "old_root", + RequiredPlugins: []string{"old_plugin"}, + DisabledPlugins: []string{"old_plugin"}, + State: "old_state", + OOMScore: 1, + } + + b := &Config{ + Root: "new_root", + RequiredPlugins: []string{"new_plugin1", "new_plugin2"}, + OOMScore: 2, + } + + err := mergeConfig(a, b) + assert.NilError(t, err) + + assert.Equal(t, a.Version, 2) + assert.Equal(t, a.Root, "new_root") + assert.Equal(t, a.State, "old_state") + assert.Equal(t, a.OOMScore, 2) + assert.DeepEqual(t, a.RequiredPlugins, []string{"new_plugin1", "new_plugin2"}) + assert.DeepEqual(t, a.DisabledPlugins, []string{"old_plugin"}) +} + +func TestResolveImports(t *testing.T) { + tempDir, err := ioutil.TempDir("", "containerd_") + assert.NilError(t, err) + defer os.RemoveAll(tempDir) + + for _, filename := range []string{"config_1.toml", "config_2.toml", "test.toml"} { + err = ioutil.WriteFile(filepath.Join(tempDir, filename), []byte(""), 0600) + assert.NilError(t, err) + } + + imports, err := resolveImports(filepath.Join(tempDir, "root.toml"), []string{ + filepath.Join(tempDir, "config_*.toml"), // Glob + filepath.Join(tempDir, "./test.toml"), // Path clean up + "current.toml", // Resolve current working dir + }) + assert.NilError(t, err) + + assert.DeepEqual(t, imports, []string{ + filepath.Join(tempDir, "config_1.toml"), + filepath.Join(tempDir, "config_2.toml"), + filepath.Join(tempDir, "test.toml"), + filepath.Join(tempDir, "current.toml"), + }) +} + +func TestLoadSingleConfig(t *testing.T) { + data := ` +version = 2 +root = "/var/lib/containerd" +` + tempDir, err := ioutil.TempDir("", "containerd_") + assert.NilError(t, err) + defer os.RemoveAll(tempDir) + + path := filepath.Join(tempDir, "config.toml") + err = ioutil.WriteFile(path, []byte(data), 0600) + assert.NilError(t, err) + + var out Config + err = LoadConfig(path, &out) + assert.NilError(t, err) + assert.Equal(t, 2, out.Version) + assert.Equal(t, "/var/lib/containerd", out.Root) +} + +func TestLoadConfigWithImports(t *testing.T) { + data1 := ` +version = 2 +root = "/var/lib/containerd" +imports = ["data2.toml"] +` + + data2 := ` +disabled_plugins = ["io.containerd.v1.xyz"] +` + + tempDir, err := ioutil.TempDir("", "containerd_") + assert.NilError(t, err) + defer os.RemoveAll(tempDir) + + err = ioutil.WriteFile(filepath.Join(tempDir, "data1.toml"), []byte(data1), 0600) + assert.NilError(t, err) + + err = ioutil.WriteFile(filepath.Join(tempDir, "data2.toml"), []byte(data2), 0600) + assert.NilError(t, err) + + var out Config + err = LoadConfig(filepath.Join(tempDir, "data1.toml"), &out) + assert.NilError(t, err) + + assert.Equal(t, 2, out.Version) + assert.Equal(t, "/var/lib/containerd", out.Root) + assert.DeepEqual(t, []string{"io.containerd.v1.xyz"}, out.DisabledPlugins) +} + +func TestLoadConfigWithCircularImports(t *testing.T) { + data1 := ` +version = 2 +root = "/var/lib/containerd" +imports = ["data2.toml", "data1.toml"] +` + + data2 := ` +disabled_plugins = ["io.containerd.v1.xyz"] +imports = ["data1.toml", "data2.toml"] +` + tempDir, err := ioutil.TempDir("", "containerd_") + assert.NilError(t, err) + defer os.RemoveAll(tempDir) + + err = ioutil.WriteFile(filepath.Join(tempDir, "data1.toml"), []byte(data1), 0600) + assert.NilError(t, err) + + err = ioutil.WriteFile(filepath.Join(tempDir, "data2.toml"), []byte(data2), 0600) + assert.NilError(t, err) + + var out Config + err = LoadConfig(filepath.Join(tempDir, "data1.toml"), &out) + assert.NilError(t, err) + + assert.Equal(t, 2, out.Version) + assert.Equal(t, "/var/lib/containerd", out.Root) + assert.DeepEqual(t, []string{"io.containerd.v1.xyz"}, out.DisabledPlugins) + + assert.DeepEqual(t, []string{ + filepath.Join(tempDir, "data1.toml"), + filepath.Join(tempDir, "data2.toml"), + }, out.Imports) +} From bca0857530d1ead52d03595b1558bfe98112d187 Mon Sep 17 00:00:00 2001 From: Maksym Pavlenko Date: Thu, 22 Aug 2019 17:22:25 -0700 Subject: [PATCH 05/77] Fix toml plugin decoding Do not rely on toml metadata when decoding plugin's configs as it's not possible to merge toml.MetaData structs during import. Signed-off-by: Maksym Pavlenko --- services/server/config/config.go | 7 ++----- services/server/config/config_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/services/server/config/config.go b/services/server/config/config.go index 7c47cd745946..57dc715854aa 100644 --- a/services/server/config/config.go +++ b/services/server/config/config.go @@ -64,8 +64,6 @@ type Config struct { Imports []string `toml:"imports"` StreamProcessors []StreamProcessor `toml:"stream_processors"` - - md toml.MetaData } // StreamProcessor provides configuration for diff content processors @@ -203,7 +201,7 @@ func (c *Config) Decode(p *plugin.Registration) (interface{}, error) { if !ok { return p.Config, nil } - if err := c.md.PrimitiveDecode(data, p.Config); err != nil { + if err := toml.PrimitiveDecode(data, p.Config); err != nil { return nil, err } return p.Config, nil @@ -258,11 +256,10 @@ func LoadConfig(path string, out *Config) error { // loadConfigFile decodes a TOML file at the given path func loadConfigFile(path string) (*Config, error) { config := &Config{} - md, err := toml.DecodeFile(path, &config) + _, err := toml.DecodeFile(path, &config) if err != nil { return nil, err } - config.md = md return config, nil } diff --git a/services/server/config/config_test.go b/services/server/config/config_test.go index 56f9cb030c35..8e65a4630709 100644 --- a/services/server/config/config_test.go +++ b/services/server/config/config_test.go @@ -23,6 +23,8 @@ import ( "testing" "gotest.tools/assert" + + "github.com/containerd/containerd/plugin" ) func TestMergeConfigs(t *testing.T) { @@ -161,3 +163,28 @@ imports = ["data1.toml", "data2.toml"] filepath.Join(tempDir, "data2.toml"), }, out.Imports) } + +func TestDecodePlugin(t *testing.T) { + data := ` +version = 1 +[plugins.linux] + shim_debug = true +` + + tempDir, err := ioutil.TempDir("", "containerd_") + assert.NilError(t, err) + defer os.RemoveAll(tempDir) + + path := filepath.Join(tempDir, "config.toml") + err = ioutil.WriteFile(path, []byte(data), 0600) + assert.NilError(t, err) + + var out Config + err = LoadConfig(path, &out) + assert.NilError(t, err) + + pluginConfig := map[string]interface{}{} + _, err = out.Decode(&plugin.Registration{ID: "linux", Config: &pluginConfig}) + assert.NilError(t, err) + assert.Equal(t, true, pluginConfig["shim_debug"]) +} From 4d11bb36ad4d5aad00e150916b8280ff3010e0a9 Mon Sep 17 00:00:00 2001 From: "renzhen.rz" Date: Wed, 21 Aug 2019 22:54:01 +0800 Subject: [PATCH 06/77] devmapper: activate dm device if snap device marked as activated - reproducer 1. stop a container; 2. reboot, or dmsetup remove its corresponding dm device; 3. start the container, it will fail like: """ Error: failed to start containers: {"message":"failed to create container(4f33d2760760c41518a84821153ccdf7f80980b797b783cdd75178fc6ca0bf4b) on containerd: failed to create task for container(4f33d2760760c41518a84821153ccdf7f80980b797b783cdd75178fc6ca0bf4b): failed to mount rootfs component &{ext4 /dev/mapper/vg0-mythinpool-snap-2 []}: no such file or directory: unknown"} """ - how the fix works activate the dm device if necessary, and give a warn msg: """ time="2019-08-21T22:44:08.422695797+08:00" level=warning msg="devmapper device \"vg0-mythinpool-snap-2\" marked as \"Activated\" but not active, activating it" """ Signed-off-by: Eric Ren --- snapshots/devmapper/pool_device.go | 38 ++++++++++++++++++------- snapshots/devmapper/pool_device_test.go | 6 ++-- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/snapshots/devmapper/pool_device.go b/snapshots/devmapper/pool_device.go index 4f14383d78b6..87bdb310fb94 100644 --- a/snapshots/devmapper/pool_device.go +++ b/snapshots/devmapper/pool_device.go @@ -73,23 +73,41 @@ func NewPoolDevice(ctx context.Context, config *Config) (*PoolDevice, error) { return poolDevice, nil } -// ensureDeviceStates marks devices with incomplete states (after crash) as 'Faulty' +// ensureDeviceStates updates devices to their real state: +// - marks devices with incomplete states (after crash) as 'Faulty' +// - activates devices if they are marked as 'Activated' but the dm +// device is not active, which can happen to a stopped container +// after a reboot func (p *PoolDevice) ensureDeviceStates(ctx context.Context) error { - var devices []*DeviceInfo + var faultyDevices []*DeviceInfo + var activatedDevices []*DeviceInfo if err := p.metadata.WalkDevices(ctx, func(info *DeviceInfo) error { switch info.State { - case Activated, Suspended, Resumed, Deactivated, Removed, Faulty: - return nil + case Suspended, Resumed, Deactivated, Removed, Faulty: + case Activated: + activatedDevices = append(activatedDevices, info) + default: + faultyDevices = append(faultyDevices, info) } - devices = append(devices, info) return nil }); err != nil { return errors.Wrap(err, "failed to query devices from metastore") } var result *multierror.Error - for _, dev := range devices { + for _, dev := range activatedDevices { + if p.IsActivated(dev.Name) { + continue + } + + log.G(ctx).Warnf("devmapper device %q marked as %q but not active, activating it", dev.Name, dev.State) + if err := p.activateDevice(ctx, dev); err != nil { + result = multierror.Append(result, err) + } + } + + for _, dev := range faultyDevices { log.G(ctx). WithField("dev_id", dev.DeviceID). WithField("parent", dev.ParentName). @@ -350,7 +368,7 @@ func (p *PoolDevice) DeactivateDevice(ctx context.Context, deviceName string, de return nil } -// IsActivated returns true if thin-device is activated and not suspended +// IsActivated returns true if thin-device is activated func (p *PoolDevice) IsActivated(deviceName string) bool { infos, err := dmsetup.Info(deviceName) if err != nil || len(infos) != 1 { @@ -358,11 +376,11 @@ func (p *PoolDevice) IsActivated(deviceName string) bool { return false } - if devInfo := infos[0]; devInfo.Suspended { - return false + if devInfo := infos[0]; devInfo.TableLive { + return true } - return true + return false } // IsLoaded returns true if thin-device is visible for dmsetup diff --git a/snapshots/devmapper/pool_device_test.go b/snapshots/devmapper/pool_device_test.go index b6d46c00cae5..49eef40892b7 100644 --- a/snapshots/devmapper/pool_device_test.go +++ b/snapshots/devmapper/pool_device_test.go @@ -161,7 +161,9 @@ func TestPoolDeviceMarkFaulty(t *testing.T) { err := store.AddDevice(testCtx, &DeviceInfo{Name: "1", State: Unknown}) assert.NilError(t, err) - err = store.AddDevice(testCtx, &DeviceInfo{Name: "2", State: Activated}) + // Note: do not use 'Activated' here because pool.ensureDeviceStates() will + // try to activate the real dm device, which will fail on a faked device. + err = store.AddDevice(testCtx, &DeviceInfo{Name: "2", State: Deactivated}) assert.NilError(t, err) pool := &PoolDevice{metadata: store} @@ -177,7 +179,7 @@ func TestPoolDeviceMarkFaulty(t *testing.T) { assert.Equal(t, Faulty, info.State) assert.Equal(t, "1", info.Name) case 2: - assert.Equal(t, Activated, info.State) + assert.Equal(t, Deactivated, info.State) assert.Equal(t, "2", info.Name) default: t.Error("unexpected walk call") From b5f03eacd34c7beffe5c8cc2a5cfee852335f681 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 23 Aug 2019 15:15:23 +0000 Subject: [PATCH 07/77] Pin to libseccomp 2.3.3 lib seccomp 2.4 has huge performance regressions. This change pins to 2.3.3 where that is not an issue Signed-off-by: Michael Crosby --- .travis.yml | 2 +- script/setup/install-seccomp | 37 ++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) create mode 100755 script/setup/install-seccomp diff --git a/.travis.yml b/.travis.yml index 8915f818055e..c97348a48b72 100644 --- a/.travis.yml +++ b/.travis.yml @@ -34,7 +34,6 @@ addons: - libprotobuf-c0-dev - libprotobuf-dev - socat - - libseccomp-dev before_install: - uname -r @@ -48,6 +47,7 @@ install: - go get -u github.com/vbatts/git-validation - go get -u github.com/kunalkushwaha/ltag - go get -u github.com/LK4D4/vndr + - if [ "$TRAVIS_GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH script/setup/install-seccomp ; fi - if [ "$TRAVIS_GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH script/setup/install-runc ; fi - if [ "$TRAVIS_GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH script/setup/install-cni ; fi - if [ "$TRAVIS_GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH script/setup/install-critools ; fi diff --git a/script/setup/install-seccomp b/script/setup/install-seccomp new file mode 100755 index 000000000000..d5569b78a322 --- /dev/null +++ b/script/setup/install-seccomp @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# Builds and installs runc to /usr/local/go/bin based off +# the commit defined in vendor.conf +# +set -eu -o pipefail + +set -x + +export SECCOMP_VERSION="2.3.3" +export SECCOMP_PATH="$(mktemp -d)" +curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" | tar -xzC "$SECCOMP_PATH" --strip-components=1 +( + cd "$SECCOMP_PATH" + ./configure --prefix=/usr/local + make + make install + ldconfig +) + +rm -rf "$SECCOMP_PATH" From 19cd0a4f1212dbb283c6201b06ba06affb61bd60 Mon Sep 17 00:00:00 2001 From: Maksym Pavlenko Date: Fri, 23 Aug 2019 10:12:19 -0700 Subject: [PATCH 08/77] Append slices when importing config files Signed-off-by: Maksym Pavlenko --- services/server/config/config.go | 18 ++++++++---------- services/server/config/config_test.go | 5 ++++- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/services/server/config/config.go b/services/server/config/config.go index 57dc715854aa..e3aacbae8b76 100644 --- a/services/server/config/config.go +++ b/services/server/config/config.go @@ -292,17 +292,15 @@ func resolveImports(parent string, imports []string) ([]string, error) { } // mergeConfig merges Config structs with the following rules: -// 'to' 'from' 'result' overwrite? -// "" "value" "value" yes -// "value" "" "value" no -// 1 0 1 no -// 0 1 1 yes -// []{"1"} []{"2"} []{"2"} yes -// []{"1"} []{} []{"1"} no +// 'to' 'from' 'result' +// "" "value" "value" +// "value" "" "value" +// 1 0 1 +// 0 1 1 +// []{"1"} []{"2"} []{"1","2"} +// []{"1"} []{} []{"1"} func mergeConfig(to, from *Config) error { - return mergo.Merge(to, from, func(config *mergo.Config) { - config.Overwrite = true - }) + return mergo.Merge(to, from, mergo.WithOverride, mergo.WithAppendSlice) } // V1DisabledFilter matches based on ID diff --git a/services/server/config/config_test.go b/services/server/config/config_test.go index 8e65a4630709..2f08488942f4 100644 --- a/services/server/config/config_test.go +++ b/services/server/config/config_test.go @@ -35,12 +35,14 @@ func TestMergeConfigs(t *testing.T) { DisabledPlugins: []string{"old_plugin"}, State: "old_state", OOMScore: 1, + Timeouts: map[string]string{"a": "1"}, } b := &Config{ Root: "new_root", RequiredPlugins: []string{"new_plugin1", "new_plugin2"}, OOMScore: 2, + Timeouts: map[string]string{"b": "2"}, } err := mergeConfig(a, b) @@ -50,8 +52,9 @@ func TestMergeConfigs(t *testing.T) { assert.Equal(t, a.Root, "new_root") assert.Equal(t, a.State, "old_state") assert.Equal(t, a.OOMScore, 2) - assert.DeepEqual(t, a.RequiredPlugins, []string{"new_plugin1", "new_plugin2"}) + assert.DeepEqual(t, a.RequiredPlugins, []string{"old_plugin", "new_plugin1", "new_plugin2"}) assert.DeepEqual(t, a.DisabledPlugins, []string{"old_plugin"}) + assert.DeepEqual(t, a.Timeouts, map[string]string{"a": "1", "b": "2"}) } func TestResolveImports(t *testing.T) { From ea6c749e3536d0bb745a4902954d0caf115af4ba Mon Sep 17 00:00:00 2001 From: Maksym Pavlenko Date: Fri, 23 Aug 2019 11:08:25 -0700 Subject: [PATCH 09/77] Update config doc Signed-off-by: Maksym Pavlenko --- docs/man/containerd-config.toml.5.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/man/containerd-config.toml.5.md b/docs/man/containerd-config.toml.5.md index 21a603cea9df..6b5c2cc77586 100644 --- a/docs/man/containerd-config.toml.5.md +++ b/docs/man/containerd-config.toml.5.md @@ -32,6 +32,14 @@ settings. **oom_score** : The out of memory (OOM) score applied to the containerd daemon process (Default: 0) +**imports** +: Imports is a list of additional configuration files to include. +This allows to split the main configuration file and keep some sections +separately (for example vendors may keep a custom runtime configuration in a +separate file without modifying the main `config.toml`). +Imported files will overwrite simple fields like `int` or +`string` (if not empty) and will append `array` and `map` fields. + **[grpc]** : Section for gRPC socket listener settings. Contains three properties: - **address** (Default: "/run/containerd/containerd.sock") @@ -82,6 +90,7 @@ The following is a complete **config.toml** default configuration example: root = "/var/lib/containerd" state = "/run/containerd" oom_score = 0 +imports = ["/etc/containerd/runtime_*.toml", "./debug.toml"] [grpc] address = "/run/containerd/containerd.sock" From 9e183f5e52a3c636bd0689ac8dea9dd59a869b8b Mon Sep 17 00:00:00 2001 From: Yu Yi Date: Mon, 25 Mar 2019 11:23:26 -0400 Subject: [PATCH 10/77] add cli option to download all manifests - Add `all-manifests` option to both `ctr content fetch` and `ctr images pull`. By default it is false. - This option ties to `AppendDistributionSourceLabel` in client. Signed-off-by: Yu Yi --- cmd/ctr/commands/content/fetch.go | 16 ++++++++++++++-- cmd/ctr/commands/images/pull.go | 8 ++++++++ pull.go | 11 ++++++++--- 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/cmd/ctr/commands/content/fetch.go b/cmd/ctr/commands/content/fetch.go index 99d2206b9e52..0f9f367997ee 100644 --- a/cmd/ctr/commands/content/fetch.go +++ b/cmd/ctr/commands/content/fetch.go @@ -34,7 +34,7 @@ import ( "github.com/containerd/containerd/pkg/progress" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/urfave/cli" ) @@ -66,6 +66,10 @@ Most of this is experimental and there are few leaps to make this work.`, Name: "all-platforms", Usage: "pull content from all platforms", }, + cli.BoolFlag{ + Name: "all-manifests", + Usage: "Pull manifests from all platforms and layers for a specific platform", + }, ), Action: func(clicontext *cli.Context) error { var ( @@ -95,6 +99,8 @@ type FetchConfig struct { Labels []string // Platforms to fetch Platforms []string + // Whether or not download all manifests + IsAllManifests bool } // NewFetchConfig returns the default FetchConfig from cli flags @@ -117,6 +123,9 @@ func NewFetchConfig(ctx context.Context, clicontext *cli.Context) (*FetchConfig, } config.Platforms = p } + if clicontext.Bool("all-manifests") { + config.IsAllManifests = clicontext.Bool("all-manifests") + } return config, nil } @@ -149,7 +158,10 @@ func Fetch(ctx context.Context, client *containerd.Client, ref string, config *F containerd.WithResolver(config.Resolver), containerd.WithImageHandler(h), containerd.WithSchema1Conversion, - containerd.WithAppendDistributionSourceLabel(), + } + + if config.IsAllManifests { + opts = append(opts, containerd.WithAppendDistributionSourceLabel()) } for _, platform := range config.Platforms { diff --git a/cmd/ctr/commands/images/pull.go b/cmd/ctr/commands/images/pull.go index 3216976be4ae..d7df2851f21b 100644 --- a/cmd/ctr/commands/images/pull.go +++ b/cmd/ctr/commands/images/pull.go @@ -53,6 +53,10 @@ command. As part of this process, we do the following: Name: "all-platforms", Usage: "pull content from all platforms", }, + cli.BoolFlag{ + Name: "all-manifests", + Usage: "Pull manifests from all platforms and layers for a specific platform", + }, ), Action: func(context *cli.Context) error { var ( @@ -78,6 +82,10 @@ command. As part of this process, we do the following: if err != nil { return err } + if context.Bool("all-manifests") { + config.IsAllManifests = context.Bool("all-manifests") + } + img, err := content.Fetch(ctx, client, ref, config) if err != nil { return err diff --git a/pull.go b/pull.go index ef0d147baa1e..cf62399b1dd6 100644 --- a/pull.go +++ b/pull.go @@ -140,9 +140,14 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim childrenHandler := images.ChildrenHandler(store) // Set any children labels for that content childrenHandler = images.SetChildrenLabels(store, childrenHandler) - // Filter manifests by platforms but allow to handle manifest - // and configuration for not-target platforms - childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher) + if rCtx.AppendDistributionSourceLabel { + // Filter manifests by platforms but allow to handle manifest + // and configuration for not-target platforms + childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher) + } else { + // Filter children by platforms if specified. + childrenHandler = images.FilterPlatforms(childrenHandler, rCtx.PlatformMatcher) + } // Sort and limit manifests if a finite number is needed if limit > 0 { childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit) From aae2d0d754a75ad523cb27e8f386b2cd2cc7b1ed Mon Sep 17 00:00:00 2001 From: Yu Yi Date: Mon, 25 Mar 2019 16:01:23 -0400 Subject: [PATCH 11/77] delete unnecessary checks and fix a test Signed-off-by: Yu Yi --- client_test.go | 5 ++--- cmd/ctr/commands/content/fetch.go | 6 +++--- cmd/ctr/commands/images/pull.go | 3 --- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/client_test.go b/client_test.go index 97776baaa9b5..182e5ca75921 100644 --- a/client_test.go +++ b/client_test.go @@ -287,9 +287,6 @@ func TestImagePullSomePlatforms(t *testing.T) { count := 0 for _, manifest := range manifests { children, err := images.Children(ctx, cs, manifest) - if err != nil { - t.Fatal(err) - } found := false for _, matcher := range m { @@ -315,6 +312,8 @@ func TestImagePullSomePlatforms(t *testing.T) { } ra.Close() } + } else if err == nil { + t.Fatal("manifest should not have pulled children content") } } diff --git a/cmd/ctr/commands/content/fetch.go b/cmd/ctr/commands/content/fetch.go index 0f9f367997ee..ea4c840ab08b 100644 --- a/cmd/ctr/commands/content/fetch.go +++ b/cmd/ctr/commands/content/fetch.go @@ -123,9 +123,9 @@ func NewFetchConfig(ctx context.Context, clicontext *cli.Context) (*FetchConfig, } config.Platforms = p } - if clicontext.Bool("all-manifests") { - config.IsAllManifests = clicontext.Bool("all-manifests") - } + + config.IsAllManifests = clicontext.Bool("all-manifests") + return config, nil } diff --git a/cmd/ctr/commands/images/pull.go b/cmd/ctr/commands/images/pull.go index d7df2851f21b..566b62f38971 100644 --- a/cmd/ctr/commands/images/pull.go +++ b/cmd/ctr/commands/images/pull.go @@ -82,9 +82,6 @@ command. As part of this process, we do the following: if err != nil { return err } - if context.Bool("all-manifests") { - config.IsAllManifests = context.Bool("all-manifests") - } img, err := content.Fetch(ctx, client, ref, config) if err != nil { From 8ebffecbc32e555945feca249e3464f8198a7968 Mon Sep 17 00:00:00 2001 From: Maksym Pavlenko Date: Fri, 23 Aug 2019 15:31:37 -0700 Subject: [PATCH 12/77] Use map for stream processors Signed-off-by: Maksym Pavlenko --- docs/stream_processors.md | 8 ++++---- services/server/config/config.go | 4 +--- services/server/config/config_test.go | 11 +++++++++++ services/server/server.go | 4 ++-- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/docs/stream_processors.md b/docs/stream_processors.md index 67af20d58476..ba227a5991ae 100644 --- a/docs/stream_processors.md +++ b/docs/stream_processors.md @@ -21,20 +21,20 @@ pipe's path set as the value of the environment variable `STREAM_PROCESSOR_PIPE` ## Configuration To configure stream processors for containerd, entries in the config file need to be made. -The `stream_processors` field is an array so that users can chain together multiple processors +The `stream_processors` field is a map so that users can chain together multiple processors to mutate content streams. Processor Fields: -* `id` - ID of the processor, used for passing a specific payload to the processor. +* Key - ID of the processor, used for passing a specific payload to the processor. * `accepts` - Accepted media-types for the processor that it can handle. * `returns` - The media-type that the processor returns. * `path` - Path to the processor binary. * `args` - Arguments passed to the processor binary. ```toml -[[stream_processors]] - id = "io.containerd.processor.v1.pigz" +[stream_processors] + [stream_processors."io.containerd.processor.v1.pigz"] accepts = ["application/vnd.docker.image.rootfs.diff.tar.gzip"] returns = "application/vnd.oci.image.layer.v1.tar" path = "unpigz" diff --git a/services/server/config/config.go b/services/server/config/config.go index e3aacbae8b76..bc9520fea6e8 100644 --- a/services/server/config/config.go +++ b/services/server/config/config.go @@ -63,13 +63,11 @@ type Config struct { // Imports are additional file path list to config files that can overwrite main config file fields Imports []string `toml:"imports"` - StreamProcessors []StreamProcessor `toml:"stream_processors"` + StreamProcessors map[string]StreamProcessor `toml:"stream_processors"` } // StreamProcessor provides configuration for diff content processors type StreamProcessor struct { - // ID of the processor, also used to fetch the specific payload - ID string `toml:"id"` // Accepts specific media-types Accepts []string `toml:"accepts"` // Returns the media-type diff --git a/services/server/config/config_test.go b/services/server/config/config_test.go index 2f08488942f4..90cba46f26ef 100644 --- a/services/server/config/config_test.go +++ b/services/server/config/config_test.go @@ -86,6 +86,11 @@ func TestLoadSingleConfig(t *testing.T) { data := ` version = 2 root = "/var/lib/containerd" + +[stream_processors] + [stream_processors."io.containerd.processor.v1.pigz"] + accepts = ["application/vnd.docker.image.rootfs.diff.tar.gzip"] + path = "unpigz" ` tempDir, err := ioutil.TempDir("", "containerd_") assert.NilError(t, err) @@ -100,6 +105,12 @@ root = "/var/lib/containerd" assert.NilError(t, err) assert.Equal(t, 2, out.Version) assert.Equal(t, "/var/lib/containerd", out.Root) + assert.DeepEqual(t, map[string]StreamProcessor{ + "io.containerd.processor.v1.pigz": { + Accepts: []string{"application/vnd.docker.image.rootfs.diff.tar.gzip"}, + Path: "unpigz", + }, + }, out.StreamProcessors) } func TestLoadConfigWithImports(t *testing.T) { diff --git a/services/server/server.go b/services/server/server.go index 3f2bad22b194..92cd75e7d3f6 100644 --- a/services/server/server.go +++ b/services/server/server.go @@ -89,8 +89,8 @@ func New(ctx context.Context, config *srvconfig.Config) (*Server, error) { if err != nil { return nil, err } - for _, p := range config.StreamProcessors { - diff.RegisterProcessor(diff.BinaryHandler(p.ID, p.Returns, p.Accepts, p.Path, p.Args)) + for id, p := range config.StreamProcessors { + diff.RegisterProcessor(diff.BinaryHandler(id, p.Returns, p.Accepts, p.Path, p.Args)) } serverOpts := []grpc.ServerOption{ From 24b9e2c1a0a72a7ad302cdce7da3abbc4e6295cb Mon Sep 17 00:00:00 2001 From: Maksym Pavlenko Date: Fri, 23 Aug 2019 15:49:02 -0700 Subject: [PATCH 13/77] Merge configs section by section Signed-off-by: Maksym Pavlenko --- services/server/config/config.go | 21 ++++++++++++++++++++- services/server/config/config_test.go | 25 ++++++++++++++----------- 2 files changed, 34 insertions(+), 12 deletions(-) diff --git a/services/server/config/config.go b/services/server/config/config.go index bc9520fea6e8..98fef816e916 100644 --- a/services/server/config/config.go +++ b/services/server/config/config.go @@ -297,8 +297,27 @@ func resolveImports(parent string, imports []string) ([]string, error) { // 0 1 1 // []{"1"} []{"2"} []{"1","2"} // []{"1"} []{} []{"1"} +// Maps merged by keys, but values are replaced entirely. func mergeConfig(to, from *Config) error { - return mergo.Merge(to, from, mergo.WithOverride, mergo.WithAppendSlice) + err := mergo.Merge(to, from, mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return err + } + + // Replace entire sections instead of merging map's values. + for k, v := range from.Plugins { + to.Plugins[k] = v + } + + for k, v := range from.StreamProcessors { + to.StreamProcessors[k] = v + } + + for k, v := range from.ProxyPlugins { + to.ProxyPlugins[k] = v + } + + return nil } // V1DisabledFilter matches based on ID diff --git a/services/server/config/config_test.go b/services/server/config/config_test.go index 90cba46f26ef..8660105d46af 100644 --- a/services/server/config/config_test.go +++ b/services/server/config/config_test.go @@ -29,20 +29,22 @@ import ( func TestMergeConfigs(t *testing.T) { a := &Config{ - Version: 2, - Root: "old_root", - RequiredPlugins: []string{"old_plugin"}, - DisabledPlugins: []string{"old_plugin"}, - State: "old_state", - OOMScore: 1, - Timeouts: map[string]string{"a": "1"}, + Version: 2, + Root: "old_root", + RequiredPlugins: []string{"old_plugin"}, + DisabledPlugins: []string{"old_plugin"}, + State: "old_state", + OOMScore: 1, + Timeouts: map[string]string{"a": "1"}, + StreamProcessors: map[string]StreamProcessor{"1": {Path: "2", Returns: "4"}, "2": {Path: "5"}}, } b := &Config{ - Root: "new_root", - RequiredPlugins: []string{"new_plugin1", "new_plugin2"}, - OOMScore: 2, - Timeouts: map[string]string{"b": "2"}, + Root: "new_root", + RequiredPlugins: []string{"new_plugin1", "new_plugin2"}, + OOMScore: 2, + Timeouts: map[string]string{"b": "2"}, + StreamProcessors: map[string]StreamProcessor{"1": {Path: "3"}}, } err := mergeConfig(a, b) @@ -55,6 +57,7 @@ func TestMergeConfigs(t *testing.T) { assert.DeepEqual(t, a.RequiredPlugins, []string{"old_plugin", "new_plugin1", "new_plugin2"}) assert.DeepEqual(t, a.DisabledPlugins, []string{"old_plugin"}) assert.DeepEqual(t, a.Timeouts, map[string]string{"a": "1", "b": "2"}) + assert.DeepEqual(t, a.StreamProcessors, map[string]StreamProcessor{"1": {Path: "3"}, "2": {Path: "5"}}) } func TestResolveImports(t *testing.T) { From 5cab0d3f3f0afcc06bb63dcf9eb9540afb22d8de Mon Sep 17 00:00:00 2001 From: fesu Date: Mon, 26 Aug 2019 15:01:34 +0800 Subject: [PATCH 14/77] update the max if cur will be greater than max for setting the limit Signed-off-by: Su Fei --- cmd/containerd-stress/rlimit_unix.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/containerd-stress/rlimit_unix.go b/cmd/containerd-stress/rlimit_unix.go index 807a9a92f5a9..492f60432319 100644 --- a/cmd/containerd-stress/rlimit_unix.go +++ b/cmd/containerd-stress/rlimit_unix.go @@ -31,6 +31,9 @@ func setRlimit() error { } if limit.Cur < rlimit { limit.Cur = rlimit + if limit.Max < limit.Cur { + limit.Max = limit.Cur + } if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { return err } From cbb3a3790efb138a11e5cb7c6a80ec2599354697 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Mon, 26 Aug 2019 13:36:33 +0200 Subject: [PATCH 15/77] bump hashicorp/errwrap v1.0.0 full diff: https://github.com/hashicorp/errwrap/compare/7554cd9344cec97297fa6649b055a8c98c2a1e55...v1.0.0 Signed-off-by: Sebastiaan van Stijn --- vendor.conf | 2 +- vendor/github.com/hashicorp/errwrap/README.md | 2 +- vendor/github.com/hashicorp/errwrap/go.mod | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/hashicorp/errwrap/go.mod diff --git a/vendor.conf b/vendor.conf index 4bcc15bc9c31..ba495d935378 100644 --- a/vendor.conf +++ b/vendor.conf @@ -42,7 +42,7 @@ github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 gotest.tools v2.3.0 github.com/google/go-cmp v0.2.0 go.etcd.io/bbolt v1.3.3 -github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 +github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f github.com/hashicorp/golang-lru v0.5.1 go.opencensus.io v0.22.0 diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md index 1c95f59782bb..444df08f8e77 100644 --- a/vendor/github.com/hashicorp/errwrap/README.md +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -48,7 +48,7 @@ func main() { // We can use the Contains helpers to check if an error contains // another error. It is safe to do this with a nil error, or with // an error that doesn't even use the errwrap package. - if errwrap.Contains(err, ErrNotExist) { + if errwrap.Contains(err, "does not exist") { // Do something } if errwrap.ContainsType(err, new(os.PathError)) { diff --git a/vendor/github.com/hashicorp/errwrap/go.mod b/vendor/github.com/hashicorp/errwrap/go.mod new file mode 100644 index 000000000000..c9b84022cf7a --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/errwrap From ed1df65c250321d94b3e07d556f3536cef5b97be Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Mon, 26 Aug 2019 13:37:53 +0200 Subject: [PATCH 16/77] bump hashicorp/go-multierror v1.0.0 full diff: https://github.com/hashicorp/go-multierror/compare/ed905158d87462226a13fe39ddf685ea65f1c11f...v1.0.0 Signed-off-by: Sebastiaan van Stijn --- vendor.conf | 2 +- .../github.com/hashicorp/go-multierror/format.go | 6 +++--- vendor/github.com/hashicorp/go-multierror/go.mod | 3 +++ .../hashicorp/go-multierror/multierror.go | 4 ++-- .../github.com/hashicorp/go-multierror/sort.go | 16 ++++++++++++++++ 5 files changed, 25 insertions(+), 6 deletions(-) create mode 100644 vendor/github.com/hashicorp/go-multierror/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go diff --git a/vendor.conf b/vendor.conf index ba495d935378..4436c7397001 100644 --- a/vendor.conf +++ b/vendor.conf @@ -43,7 +43,7 @@ gotest.tools v2.3.0 github.com/google/go-cmp v0.2.0 go.etcd.io/bbolt v1.3.3 github.com/hashicorp/errwrap v1.0.0 -github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f +github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/golang-lru v0.5.1 go.opencensus.io v0.22.0 diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go index 6c7a3cc91de7..47f13c49a673 100644 --- a/vendor/github.com/hashicorp/go-multierror/format.go +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -13,7 +13,7 @@ type ErrorFormatFunc func([]error) string // that occurred along with a bullet point list of the errors. func ListFormatFunc(es []error) string { if len(es) == 1 { - return fmt.Sprintf("1 error occurred:\n\n* %s", es[0]) + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) } points := make([]string, len(es)) @@ -22,6 +22,6 @@ func ListFormatFunc(es []error) string { } return fmt.Sprintf( - "%d errors occurred:\n\n%s", - len(es), strings.Join(points, "\n")) + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) } diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod new file mode 100644 index 000000000000..2534331d5f9c --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/go-multierror + +require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go index 2ea082732903..89b1422d1d17 100644 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -40,11 +40,11 @@ func (e *Error) GoString() string { } // WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementatin of the errwrap.Wrapper interface so that +// It is an implementation of the errwrap.Wrapper interface so that // multierror.Error can be used with that library. // // This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implementd only to +// than accessing the Errors field directly. It is implemented only to // satisfy the errwrap.Wrapper interface. func (e *Error) WrappedErrors() []error { return e.Errors diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 000000000000..fecb14e81c54 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} From 55f737bd322fcdfbe7e1eba335ad75d26c364e68 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Mon, 26 Aug 2019 13:40:08 +0200 Subject: [PATCH 17/77] bump hashicorp/golang-lru v0.5.3 full diff: https://github.com/hashicorp/golang-lru/compare/v0.5.1...v0.5.3 Signed-off-by: Sebastiaan van Stijn --- vendor.conf | 2 +- vendor/github.com/hashicorp/golang-lru/go.mod | 2 ++ .../hashicorp/golang-lru/simplelru/lru.go | 16 ++++++++++++++++ .../golang-lru/simplelru/lru_interface.go | 7 +++++-- 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/vendor.conf b/vendor.conf index 4436c7397001..3c3dfc18be40 100644 --- a/vendor.conf +++ b/vendor.conf @@ -44,7 +44,7 @@ github.com/google/go-cmp v0.2.0 go.etcd.io/bbolt v1.3.3 github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-multierror v1.0.0 -github.com/hashicorp/golang-lru v0.5.1 +github.com/hashicorp/golang-lru v0.5.3 go.opencensus.io v0.22.0 # cri dependencies diff --git a/vendor/github.com/hashicorp/golang-lru/go.mod b/vendor/github.com/hashicorp/golang-lru/go.mod index 824cb97e8346..8ad8826b368d 100644 --- a/vendor/github.com/hashicorp/golang-lru/go.mod +++ b/vendor/github.com/hashicorp/golang-lru/go.mod @@ -1 +1,3 @@ module github.com/hashicorp/golang-lru + +go 1.12 diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go index 5673773b22be..a86c8539e066 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -73,6 +73,9 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) { func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) + if ent.Value.(*entry) == nil { + return nil, false + } return ent.Value.(*entry).value, true } return @@ -142,6 +145,19 @@ func (c *LRU) Len() int { return c.evictList.Len() } +// Resize changes the cache size. +func (c *LRU) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go index 74c7077440c9..92d70934d632 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -10,7 +10,7 @@ type LRUCache interface { // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) - // Check if a key exsists in cache without updating the recent-ness. + // Checks if a key exists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. @@ -31,6 +31,9 @@ type LRUCache interface { // Returns the number of items in the cache. Len() int - // Clear all cache entries + // Clears all cache entries. Purge() + + // Resizes cache, returning number evicted + Resize(int) int } From a40c3830df92f63995ae0622b4d4116dfc6cf0de Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 23 Aug 2019 15:48:05 -0700 Subject: [PATCH 18/77] Add option to pull all metadata Add flags to pull and fetch to grab all metadata. Add fetch option to pull only metadata. Signed-off-by: Derek McGowan --- client.go | 5 ++--- client_opts.go | 7 +++---- cmd/ctr/commands/content/fetch.go | 35 +++++++++++++++++++++++-------- cmd/ctr/commands/images/pull.go | 6 +++--- image_test.go | 6 ++---- pull.go | 18 +++++++--------- 6 files changed, 43 insertions(+), 34 deletions(-) diff --git a/client.go b/client.go index aa626abcb8a2..5299179c6397 100644 --- a/client.go +++ b/client.go @@ -333,9 +333,8 @@ type RemoteContext struct { // MaxConcurrentDownloads is the max concurrent content downloads for each pull. MaxConcurrentDownloads int - // AppendDistributionSourceLabel allows fetcher to add distribute source - // label for each blob content, which doesn't work for legacy schema1. - AppendDistributionSourceLabel bool + // AllMetadata downloads all manifests and known-configuration files + AllMetadata bool } func defaultRemoteContext() *RemoteContext { diff --git a/client_opts.go b/client_opts.go index ed2ff05d5a46..86735953968d 100644 --- a/client_opts.go +++ b/client_opts.go @@ -195,11 +195,10 @@ func WithMaxConcurrentDownloads(max int) RemoteOpt { } } -// WithAppendDistributionSourceLabel allows fetcher to add distribute source -// label for each blob content, which doesn't work for legacy schema1. -func WithAppendDistributionSourceLabel() RemoteOpt { +// WithAllMetadata downloads all manifests and known-configuration files +func WithAllMetadata() RemoteOpt { return func(_ *Client, c *RemoteContext) error { - c.AppendDistributionSourceLabel = true + c.AllMetadata = true return nil } } diff --git a/cmd/ctr/commands/content/fetch.go b/cmd/ctr/commands/content/fetch.go index ea4c840ab08b..ea94275c7fe6 100644 --- a/cmd/ctr/commands/content/fetch.go +++ b/cmd/ctr/commands/content/fetch.go @@ -67,8 +67,12 @@ Most of this is experimental and there are few leaps to make this work.`, Usage: "pull content from all platforms", }, cli.BoolFlag{ - Name: "all-manifests", - Usage: "Pull manifests from all platforms and layers for a specific platform", + Name: "all-metadata", + Usage: "Pull metadata for all platforms", + }, + cli.BoolFlag{ + Name: "metadata-only", + Usage: "Pull all metadata including manifests and configs", }, ), Action: func(clicontext *cli.Context) error { @@ -84,6 +88,7 @@ Most of this is experimental and there are few leaps to make this work.`, if err != nil { return err } + _, err = Fetch(ctx, client, ref, config) return err }, @@ -97,10 +102,12 @@ type FetchConfig struct { ProgressOutput io.Writer // Labels to set on the content Labels []string + // PlatformMatcher matches platforms, supersedes Platforms + PlatformMatcher platforms.MatchComparer // Platforms to fetch Platforms []string - // Whether or not download all manifests - IsAllManifests bool + // Whether or not download all metadata + AllMetadata bool } // NewFetchConfig returns the default FetchConfig from cli flags @@ -124,7 +131,13 @@ func NewFetchConfig(ctx context.Context, clicontext *cli.Context) (*FetchConfig, config.Platforms = p } - config.IsAllManifests = clicontext.Bool("all-manifests") + if clicontext.Bool("metadata-only") { + config.AllMetadata = true + // Any with an empty set is None + config.PlatformMatcher = platforms.Any() + } else if clicontext.Bool("all-metadata") { + config.AllMetadata = true + } return config, nil } @@ -160,12 +173,16 @@ func Fetch(ctx context.Context, client *containerd.Client, ref string, config *F containerd.WithSchema1Conversion, } - if config.IsAllManifests { - opts = append(opts, containerd.WithAppendDistributionSourceLabel()) + if config.AllMetadata { + opts = append(opts, containerd.WithAllMetadata()) } - for _, platform := range config.Platforms { - opts = append(opts, containerd.WithPlatform(platform)) + if config.PlatformMatcher != nil { + opts = append(opts, containerd.WithPlatformMatcher(config.PlatformMatcher)) + } else { + for _, platform := range config.Platforms { + opts = append(opts, containerd.WithPlatform(platform)) + } } img, err := client.Fetch(pctx, ref, opts...) diff --git a/cmd/ctr/commands/images/pull.go b/cmd/ctr/commands/images/pull.go index 566b62f38971..6ca88df9f9fc 100644 --- a/cmd/ctr/commands/images/pull.go +++ b/cmd/ctr/commands/images/pull.go @@ -51,11 +51,11 @@ command. As part of this process, we do the following: }, cli.BoolFlag{ Name: "all-platforms", - Usage: "pull content from all platforms", + Usage: "pull content and metadata from all platforms", }, cli.BoolFlag{ - Name: "all-manifests", - Usage: "Pull manifests from all platforms and layers for a specific platform", + Name: "all-metadata", + Usage: "Pull metadata for all platforms", }, ), Action: func(context *cli.Context) error { diff --git a/image_test.go b/image_test.go index 33203764fce7..37b072a36c38 100644 --- a/image_test.go +++ b/image_test.go @@ -99,9 +99,7 @@ func TestImagePullWithDistSourceLabel(t *testing.T) { pMatcher := platforms.Default() // pull content without unpack and add distribution source label - image, err := client.Pull(ctx, imageName, - WithPlatformMatcher(pMatcher), - WithAppendDistributionSourceLabel()) + image, err := client.Pull(ctx, imageName, WithPlatformMatcher(pMatcher)) if err != nil { t.Fatal(err) } @@ -183,7 +181,7 @@ func TestImageUsage(t *testing.T) { imageName = imageName + "@" + image.Target().Digest.String() // Fetch single platforms, but all manifests pulled - if _, err := client.Fetch(ctx, imageName, WithPlatformMatcher(testPlatform)); err != nil { + if _, err := client.Fetch(ctx, imageName, WithPlatformMatcher(testPlatform), WithAllMetadata()); err != nil { t.Fatal(err) } diff --git a/pull.go b/pull.go index cf62399b1dd6..fe9f6abda5b2 100644 --- a/pull.go +++ b/pull.go @@ -140,7 +140,7 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim childrenHandler := images.ChildrenHandler(store) // Set any children labels for that content childrenHandler = images.SetChildrenLabels(store, childrenHandler) - if rCtx.AppendDistributionSourceLabel { + if rCtx.AllMetadata { // Filter manifests by platforms but allow to handle manifest // and configuration for not-target platforms childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher) @@ -164,22 +164,18 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim }, ) + appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref) + if err != nil { + return images.Image{}, err + } + handlers := append(rCtx.BaseHandlers, remotes.FetchHandler(store, fetcher), convertibleHandler, childrenHandler, + appendDistSrcLabelHandler, ) - // append distribution source label to blob data - if rCtx.AppendDistributionSourceLabel { - appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref) - if err != nil { - return images.Image{}, err - } - - handlers = append(handlers, appendDistSrcLabelHandler) - } - handler = images.Handlers(handlers...) converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) { From 6f8fb9ee5de686af0f001fffa74c7c1343601c33 Mon Sep 17 00:00:00 2001 From: Evan Hazlett Date: Mon, 26 Aug 2019 14:29:17 -0400 Subject: [PATCH 19/77] add WithImageName container opt Signed-off-by: Evan Hazlett --- container_opts.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/container_opts.go b/container_opts.go index e36b47e2e809..23e77492ee36 100644 --- a/container_opts.go +++ b/container_opts.go @@ -78,6 +78,14 @@ func WithImage(i Image) NewContainerOpts { } } +// WithImageName allows setting the image name as the base for the container +func WithImageName(n string) NewContainerOpts { + return func(ctx context.Context, _ *Client, c *containers.Container) error { + c.Image = n + return nil + } +} + // WithContainerLabels adds the provided labels to the container func WithContainerLabels(labels map[string]string) NewContainerOpts { return func(_ context.Context, _ *Client, c *containers.Container) error { From 56bcc776791b1e1cbb6eb3379613c830a0a1a2ee Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Mon, 26 Aug 2019 11:31:33 -0700 Subject: [PATCH 20/77] Handle layers with the same digest in unpacker. Signed-off-by: Lantao Liu --- unpacker.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/unpacker.go b/unpacker.go index 89c395cbbc31..1dc8c148128a 100644 --- a/unpacker.go +++ b/unpacker.go @@ -113,6 +113,12 @@ func (u *unpacker) unpack(ctx context.Context, config ocispec.Descriptor, layers if states[i].layer.Blob.Digest != layer.Digest { continue } + // Different layers may have the same digest. When that + // happens, we should continue marking the next layer + // as downloaded. + if states[i].downloaded { + continue + } states[i].downloaded = true break } From eb7a6bf02c00e398cfe5b8d7a069edd80e7fc993 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Mon, 26 Aug 2019 16:48:54 +0200 Subject: [PATCH 21/77] bump google.golang.org/grpc v1.23.0 full diff: https://github.com/grpc/grpc-go/compare/v1.20.1...v1.23.0 This update contains security fixes: - transport: block reading frames when too many transport control frames are queued (grpc/grpc-go#2970) - Addresses CVE-2019-9512 (Ping Flood), CVE-2019-9514 (Reset Flood), and CVE-2019-9515 (Settings Flood). Other changes can be found in the release notes: https://github.com/grpc/grpc-go/releases/tag/v1.23.0 Signed-off-by: Sebastiaan van Stijn --- vendor.conf | 2 +- vendor/google.golang.org/grpc/README.md | 76 +++- vendor/google.golang.org/grpc/balancer.go | 8 +- .../grpc/balancer/balancer.go | 42 ++- .../grpc/balancer/base/balancer.go | 20 +- .../grpc/balancer_conn_wrappers.go | 19 +- .../grpc/balancer_v1_wrapper.go | 13 +- vendor/google.golang.org/grpc/clientconn.go | 357 +++++++++++------- vendor/google.golang.org/grpc/codes/codes.go | 3 +- .../grpc/credentials/credentials.go | 6 +- vendor/google.golang.org/grpc/dialoptions.go | 84 +++-- vendor/google.golang.org/grpc/go.mod | 6 +- .../google.golang.org/grpc/health/client.go | 22 +- .../grpc/internal/channelz/funcs.go | 30 +- .../grpc/internal/envconfig/envconfig.go | 33 +- .../grpc/internal/internal.go | 19 +- .../grpc/internal/transport/controlbuf.go | 84 ++++- .../grpc/internal/transport/flowcontrol.go | 3 +- .../grpc/internal/transport/handler_server.go | 5 +- .../grpc/internal/transport/http2_client.go | 94 +++-- .../grpc/internal/transport/http2_server.go | 68 ++-- .../grpc/internal/transport/transport.go | 80 +++- .../google.golang.org/grpc/naming/naming.go | 3 +- .../google.golang.org/grpc/picker_wrapper.go | 8 + vendor/google.golang.org/grpc/pickfirst.go | 16 +- vendor/google.golang.org/grpc/preloader.go | 64 ++++ .../grpc/resolver/dns/dns_resolver.go | 19 + .../grpc/resolver/resolver.go | 26 +- .../grpc/resolver_conn_wrapper.go | 15 +- vendor/google.golang.org/grpc/rpc_util.go | 26 +- vendor/google.golang.org/grpc/server.go | 210 ++++++----- .../google.golang.org/grpc/service_config.go | 72 +++- .../grpc/serviceconfig/serviceconfig.go | 48 +++ .../google.golang.org/grpc/status/status.go | 22 +- vendor/google.golang.org/grpc/stream.go | 145 ++++--- vendor/google.golang.org/grpc/version.go | 2 +- 36 files changed, 1216 insertions(+), 534 deletions(-) create mode 100644 vendor/google.golang.org/grpc/preloader.go create mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go diff --git a/vendor.conf b/vendor.conf index 3c3dfc18be40..da4afee4e652 100644 --- a/vendor.conf +++ b/vendor.conf @@ -25,7 +25,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/sirupsen/logrus v1.4.1 github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3 -google.golang.org/grpc 25c4f928eaa6d96443009bd842389fb4fa48664e # v1.20.1 +google.golang.org/grpc 6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0 github.com/pkg/errors v0.8.1 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 golang.org/x/sys 9eafafc0a87e0fd0aeeba439a4573537970c44c7 https://github.com/golang/sys diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index f5eec6717f31..afbc43db5105 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,42 +1,96 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) +[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) -The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. +The Go implementation of [gRPC](https://grpc.io/): A high performance, open +source, general RPC framework that puts mobile and HTTP/2 first. For more +information see the [gRPC Quick Start: +Go](https://grpc.io/docs/quickstart/go.html) guide. Installation ------------ -To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: +To install this package, you need to install Go and setup your Go workspace on +your computer. The simplest way to install the library is to run: ``` $ go get -u google.golang.org/grpc ``` +With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in +your source code and `go [build|run|test]` will automatically download the +necessary dependencies ([Go modules +ref](https://github.com/golang/go/wiki/Modules)). + +If you are trying to access grpc-go from within China, please see the +[FAQ](#FAQ) below. + Prerequisites ------------- - gRPC-Go requires Go 1.9 or later. -Constraints ------------ -The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. - Documentation ------------- -See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). +- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API + descriptions. +- Documentation on specific topics can be found in the [Documentation + directory](Documentation/). +- Examples can be found in the [examples directory](examples/). Performance ----------- -See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). +Performance benchmark data for grpc-go and other languages is maintained in +[this +dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). Status ------ -General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). +General Availability [Google Cloud Platform Launch +Stages](https://cloud.google.com/terms/launch-stages). FAQ --- +#### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +``` +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- Without Go module support: `git clone` the repo manually: + + ``` + git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc + ``` + + You will need to do the same for all of grpc's dependencies in `golang.org`, + e.g. `golang.org/x/net`. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ``` + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. Please refer to [this + issue](https://github.com/golang/go/issues/28652) in the golang repo regarding + this concern. + #### Compiling error, undefined: grpc.SupportPackageIsVersion Please update proto package, gRPC package and rebuild the proto files: diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go index a78e702baee3..a8eb0f476091 100644 --- a/vendor/google.golang.org/grpc/balancer.go +++ b/vendor/google.golang.org/grpc/balancer.go @@ -43,7 +43,7 @@ type Address struct { // BalancerConfig specifies the configurations for Balancer. // -// Deprecated: please use package balancer. +// Deprecated: please use package balancer. May be removed in a future 1.x release. type BalancerConfig struct { // DialCreds is the transport credential the Balancer implementation can // use to dial to a remote load balancer server. The Balancer implementations @@ -57,7 +57,7 @@ type BalancerConfig struct { // BalancerGetOptions configures a Get call. // -// Deprecated: please use package balancer. +// Deprecated: please use package balancer. May be removed in a future 1.x release. type BalancerGetOptions struct { // BlockingWait specifies whether Get should block when there is no // connected address. @@ -66,7 +66,7 @@ type BalancerGetOptions struct { // Balancer chooses network addresses for RPCs. // -// Deprecated: please use package balancer. +// Deprecated: please use package balancer. May be removed in a future 1.x release. type Balancer interface { // Start does the initialization work to bootstrap a Balancer. For example, // this function may start the name resolution and watch the updates. It will @@ -120,7 +120,7 @@ type Balancer interface { // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch // the name resolution updates and updates the addresses available correspondingly. // -// Deprecated: please use package balancer/roundrobin. +// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release. func RoundRobin(r naming.Resolver) Balancer { return &roundRobin{r: r} } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index fafede238c13..c266f4ec102c 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -22,6 +22,7 @@ package balancer import ( "context" + "encoding/json" "errors" "net" "strings" @@ -31,6 +32,7 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" ) var ( @@ -39,7 +41,10 @@ var ( ) // Register registers the balancer builder to the balancer map. b.Name -// (lowercased) will be used as the name registered with this builder. +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Balancers are @@ -138,6 +143,8 @@ type ClientConn interface { ResolveNow(resolver.ResolveNowOption) // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. Target() string } @@ -155,6 +162,10 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) // ChannelzParentID is the entity parent's channelz unique identification number. ChannelzParentID int64 + // Target contains the parsed address info of the dial target. It is the same resolver.Target as + // passed to the resolver. + // See the documentation for the resolver.Target type for details about what it contains. + Target resolver.Target } // Builder creates a balancer. @@ -166,6 +177,14 @@ type Builder interface { Name() string } +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + // PickOptions contains addition information for the Pick operation. type PickOptions struct { // FullMethodName is the method name that NewClientStream() is called @@ -264,7 +283,7 @@ type Balancer interface { // non-nil error to gRPC. // // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateResolverState will be called instead. + // UpdateClientConnState will be called instead. HandleResolvedAddrs([]resolver.Address, error) // Close closes the balancer. The balancer is not required to call // ClientConn.RemoveSubConn for its existing SubConns. @@ -277,14 +296,23 @@ type SubConnState struct { // TODO: add last connection error } +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + // V2Balancer is defined for documentation purposes. If a Balancer also -// implements V2Balancer, its UpdateResolverState method will be called instead -// of HandleResolvedAddrs and its UpdateSubConnState will be called instead of -// HandleSubConnStateChange. +// implements V2Balancer, its UpdateClientConnState method will be called +// instead of HandleResolvedAddrs and its UpdateSubConnState will be called +// instead of HandleSubConnStateChange. type V2Balancer interface { - // UpdateResolverState is called by gRPC when the state of the resolver + // UpdateClientConnState is called by gRPC when the state of the ClientConn // changes. - UpdateResolverState(resolver.State) + UpdateClientConnState(ClientConnState) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. UpdateSubConnState(SubConn, SubConnState) diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index c5a51bd1d992..1af88f0a3f1b 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -70,13 +70,15 @@ func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) panic("not implemented") } -func (b *baseBalancer) UpdateResolverState(s resolver.State) { - // TODO: handle s.Err (log if not nil) once implemented. - // TODO: handle s.ServiceConfig? - grpclog.Infoln("base.baseBalancer: got new resolver state: ", s) +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) { + // TODO: handle s.ResolverState.Err (log if not nil) once implemented. + // TODO: handle s.ResolverState.ServiceConfig? + if grpclog.V(2) { + grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) + } // addrsSet is the set converted from addrs, it's used for quick lookup of an address. addrsSet := make(map[resolver.Address]struct{}) - for _, a := range s.Addresses { + for _, a := range s.ResolverState.Addresses { addrsSet[a] = struct{}{} if _, ok := b.subConns[a]; !ok { // a is a new address (not existing in b.subConns). @@ -127,10 +129,14 @@ func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectiv func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState - grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + if grpclog.V(2) { + grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } oldS, ok := b.scStates[sc] if !ok { - grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if grpclog.V(2) { + grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } return } b.scStates[sc] = s diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index bc965f0acaaa..8df4095ca951 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -88,7 +88,7 @@ type ccBalancerWrapper struct { cc *ClientConn balancer balancer.Balancer stateChangeQueue *scStateUpdateBuffer - resolverUpdateCh chan *resolver.State + ccUpdateCh chan *balancer.ClientConnState done chan struct{} mu sync.Mutex @@ -99,7 +99,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui ccb := &ccBalancerWrapper{ cc: cc, stateChangeQueue: newSCStateUpdateBuffer(), - resolverUpdateCh: make(chan *resolver.State, 1), + ccUpdateCh: make(chan *balancer.ClientConnState, 1), done: make(chan struct{}), subConns: make(map[*acBalancerWrapper]struct{}), } @@ -126,7 +126,7 @@ func (ccb *ccBalancerWrapper) watcher() { } else { ccb.balancer.HandleSubConnStateChange(t.sc, t.state) } - case s := <-ccb.resolverUpdateCh: + case s := <-ccb.ccUpdateCh: select { case <-ccb.done: ccb.balancer.Close() @@ -134,9 +134,9 @@ func (ccb *ccBalancerWrapper) watcher() { default: } if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateResolverState(*s) + ub.UpdateClientConnState(*s) } else { - ccb.balancer.HandleResolvedAddrs(s.Addresses, nil) + ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil) } case <-ccb.done: } @@ -151,9 +151,11 @@ func (ccb *ccBalancerWrapper) watcher() { for acbw := range scs { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } + ccb.UpdateBalancerState(connectivity.Connecting, nil) return default: } + ccb.cc.firstResolveEvent.Fire() } } @@ -178,9 +180,10 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) { +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) { if ccb.cc.curBalancerName != grpclbName { // Filter any grpclb addresses since we don't have the grpclb balancer. + s := &ccs.ResolverState for i := 0; i < len(s.Addresses); { if s.Addresses[i].Type == resolver.GRPCLB { copy(s.Addresses[i:], s.Addresses[i+1:]) @@ -191,10 +194,10 @@ func (ccb *ccBalancerWrapper) updateResolverState(s resolver.State) { } } select { - case <-ccb.resolverUpdateCh: + case <-ccb.ccUpdateCh: default: } - ccb.resolverUpdateCh <- &s + ccb.ccUpdateCh <- ccs } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go index 29bda6353dd5..66e9a44ac4da 100644 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -20,7 +20,6 @@ package grpc import ( "context" - "strings" "sync" "google.golang.org/grpc/balancer" @@ -34,13 +33,7 @@ type balancerWrapperBuilder struct { } func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - targetAddr := cc.Target() - targetSplitted := strings.Split(targetAddr, ":///") - if len(targetSplitted) >= 2 { - targetAddr = targetSplitted[1] - } - - bwb.b.Start(targetAddr, BalancerConfig{ + bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ DialCreds: opts.DialCreds, Dialer: opts.Dialer, }) @@ -49,7 +42,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B balancer: bwb.b, pickfirst: pickfirst, cc: cc, - targetAddr: targetAddr, + targetAddr: opts.Target.Endpoint, startCh: make(chan struct{}), conns: make(map[resolver.Address]balancer.SubConn), connSt: make(map[balancer.SubConn]*scState), @@ -120,7 +113,7 @@ func (bw *balancerWrapper) lbWatcher() { } for addrs := range notifyCh { - grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) + grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) if bw.pickfirst { var ( oldA resolver.Address diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index bd2d2b317798..a7643df7d297 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -38,13 +38,13 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. + "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" ) @@ -137,6 +137,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * opt.apply(&cc.dopts) } + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + defer func() { if err != nil { cc.Close() @@ -290,6 +293,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, } // Build the resolver. @@ -327,6 +331,68 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * return cc, nil } +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. type connectivityStateManager struct { @@ -466,24 +532,6 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { } } -// gRPC should resort to default service config when: -// * resolver service config is disabled -// * or, resolver does not return a service config or returns an invalid one. -func (cc *ClientConn) fallbackToDefaultServiceConfig(sc string) bool { - if cc.dopts.disableServiceConfig { - return true - } - // The logic below is temporary, will be removed once we change the resolver.State ServiceConfig field type. - // Right now, we assume that empty service config string means resolver does not return a config. - if sc == "" { - return true - } - // TODO: the logic below is temporary. Once we finish the logic to validate service config - // in resolver, we will replace the logic below. - _, err := parseServiceConfig(sc) - return err != nil -} - func (cc *ClientConn) updateResolverState(s resolver.State) error { cc.mu.Lock() defer cc.mu.Unlock() @@ -494,54 +542,47 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error { return nil } - if cc.fallbackToDefaultServiceConfig(s.ServiceConfig) { + if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { if cc.dopts.defaultServiceConfig != nil && cc.sc == nil { cc.applyServiceConfig(cc.dopts.defaultServiceConfig) } - } else { - // TODO: the parsing logic below will be moved inside resolver. - sc, err := parseServiceConfig(s.ServiceConfig) - if err != nil { - return err - } - if cc.sc == nil || cc.sc.rawJSONString != s.ServiceConfig { - cc.applyServiceConfig(sc) - } - } - - // update the service config that will be sent to balancer. - if cc.sc != nil { - s.ServiceConfig = cc.sc.rawJSONString + } else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok { + cc.applyServiceConfig(sc) } + var balCfg serviceconfig.LoadBalancingConfig if cc.dopts.balancerBuilder == nil { // Only look at balancer types and switch balancer if balancer dial // option is not set. - var isGRPCLB bool - for _, a := range s.Addresses { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } var newBalancerName string - // TODO: use new loadBalancerConfig field with appropriate priority. - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + balCfg = cc.sc.lbConfig.cfg } else { - newBalancerName = PickFirstBalancerName + var isGRPCLB bool + for _, a := range s.Addresses { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } cc.switchBalancer(newBalancerName) } else if cc.balancerWrapper == nil { // Balancer dial option was set, and this is the first time handling // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.curBalancerName = cc.dopts.balancerBuilder.Name() cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) } - cc.balancerWrapper.updateResolverState(s) - cc.firstResolveEvent.Fire() + cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) return nil } @@ -554,7 +595,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error { // // Caller must hold cc.mu. func (cc *ClientConn) switchBalancer(name string) { - if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) { + if strings.EqualFold(cc.curBalancerName, name) { return } @@ -693,6 +734,8 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. ac.updateConnectivityState(connectivity.Connecting) ac.mu.Unlock() @@ -703,7 +746,16 @@ func (ac *addrConn) connect() error { // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// It checks whether current connected address of ac is in the new addrs list. +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. // - If true, it updates ac.addrs and returns true. The ac will keep using // the existing connection. // - If false, it does nothing and returns false. @@ -711,17 +763,18 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) - if ac.state == connectivity.Shutdown { + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { ac.addrs = addrs return true } - // Unless we're busy reconnecting already, let's reconnect from the top of - // the list. - if ac.state != connectivity.Ready { + if ac.state == connectivity.Connecting { return false } + // ac.state is Ready, try to find the connected address. var curAddrFound bool for _, a := range addrs { if reflect.DeepEqual(ac.curAddr, a) { @@ -970,6 +1023,9 @@ func (ac *addrConn) resetTransport() { // The spec doesn't mention what should be done for multiple addresses. // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting) + ac.transport = nil ac.mu.Unlock() newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) @@ -1004,55 +1060,32 @@ func (ac *addrConn) resetTransport() { ac.mu.Lock() if ac.state == connectivity.Shutdown { - newTr.Close() ac.mu.Unlock() + newTr.Close() return } ac.curAddr = addr ac.transport = newTr ac.backoffIdx = 0 - healthCheckConfig := ac.cc.healthCheckConfig() - // LB channel health checking is only enabled when all the four requirements below are met: - // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption, - // 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package, - // 3. a service config with non-empty healthCheckConfig field is provided, - // 4. the current load balancer allows it. hctx, hcancel := context.WithCancel(ac.ctx) - healthcheckManagingState := false - if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled { - if ac.cc.dopts.healthCheckFunc == nil { - // TODO: add a link to the health check doc in the error message. - grpclog.Error("the client side LB channel health check function has not been set.") - } else { - // TODO(deklerk) refactor to just return transport - go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName) - healthcheckManagingState = true - } - } - if !healthcheckManagingState { - ac.updateConnectivityState(connectivity.Ready) - } + ac.startHealthCheck(hctx) ac.mu.Unlock() // Block until the created transport is down. And when this happens, // we restart from the top of the addr list. <-reconnect.Done() hcancel() - - // Need to reconnect after a READY, the addrConn enters - // TRANSIENT_FAILURE. + // restart connecting - the top of the loop will set state to + // CONNECTING. This is against the current connectivity semantics doc, + // however it allows for graceful behavior for RPCs not yet dispatched + // - unfortunate timing would otherwise lead to the RPC failing even + // though the TRANSIENT_FAILURE state (called for by the doc) would be + // instantaneous. // - // This will set addrConn to TRANSIENT_FAILURE for a very short period - // of time, and turns CONNECTING. It seems reasonable to skip this, but - // READY-CONNECTING is not a valid transition. - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure) - ac.mu.Unlock() + // Ideally we should transition to Idle here and block until there is + // RPC activity that leads to the balancer requesting a reconnect of + // the associated SubConn. } } @@ -1066,8 +1099,6 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T ac.mu.Unlock() return nil, resolver.Address{}, nil, errConnClosing } - ac.updateConnectivityState(connectivity.Connecting) - ac.transport = nil ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1111,14 +1142,35 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne Authority: ac.cc.authority, } + once := sync.Once{} onGoAway := func(r transport.GoAwayReason) { ac.mu.Lock() ac.adjustParams(r) + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting) + } + }) ac.mu.Unlock() reconnect.Fire() } onClose := func() { + ac.mu.Lock() + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting) + } + }) + ac.mu.Unlock() close(onCloseCalled) reconnect.Fire() } @@ -1140,60 +1192,99 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne return nil, nil, err } - if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn { - select { - case <-time.After(connectDeadline.Sub(time.Now())): - // We didn't get the preface in time. - newTr.Close() - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: - // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. - } + select { + case <-time.After(connectDeadline.Sub(time.Now())): + // We didn't get the preface in time. + newTr.Close() + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + return nil, nil, errors.New("timed out waiting for server handshake") + case <-prefaceReceived: + // We got the preface - huzzah! things are good. + case <-onCloseCalled: + // The transport has already closed - noop. + return nil, nil, errors.New("connection closed") + // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. } return newTr, reconnect, nil } -func (ac *addrConn) startHealthCheck(ctx context.Context, newTr transport.ClientTransport, addr resolver.Address, serviceName string) { - // Set up the health check helper functions - newStream := func() (interface{}, error) { - return ac.newClientStream(ctx, &StreamDesc{ServerStreams: true}, "/grpc.health.v1.Health/Watch", newTr) +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + grpclog.Error("Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (interface{}, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) } - firstReady := true - reportHealth := func(ok bool) { + setConnectivityState := func(s connectivity.State) { ac.mu.Lock() defer ac.mu.Unlock() - if ac.transport != newTr { + if ac.transport != currentTr { return } - if ok { - if firstReady { - firstReady = false - ac.curAddr = addr - } - ac.updateConnectivityState(connectivity.Ready) - } else { - ac.updateConnectivityState(connectivity.TransientFailure) - } + ac.updateConnectivityState(s) } - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, reportHealth, serviceName) - if err != nil { - if status.Code(err) == codes.Unimplemented { - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", - Severity: channelz.CtError, - }) + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", + Severity: channelz.CtError, + }) + } + grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) } - grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") - } else { - grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) } - } + }() } func (ac *addrConn) resetConnectBackoff() { diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index d9b9d5782e08..02738839dd98 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -132,7 +132,8 @@ const ( // Unavailable indicates the service is currently unavailable. // This is a most likely a transient condition and may be corrected - // by retrying with a backoff. + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 88aff94596a1..8ea3d4a1dc28 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -278,24 +278,22 @@ type ChannelzSecurityValue interface { // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. type TLSChannelzSecurityValue struct { + ChannelzSecurityValue StandardName string LocalCertificate []byte RemoteCertificate []byte } -func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {} - // OtherChannelzSecurityValue defines the struct that non-TLS protocol should return // from GetSecurityValue(), which contains protocol specific security info. Note // the Value field will be sent to users of channelz requesting channel info, and // thus sensitive info should better be avoided. type OtherChannelzSecurityValue struct { + ChannelzSecurityValue Name string Value proto.Message } -func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {} - var cipherSuiteLookup = map[uint16]string{ tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index e114fecbb7b4..e8f34d0d6eaa 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -39,8 +39,12 @@ import ( // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + cp Compressor dc Decompressor bs backoff.Strategy @@ -56,7 +60,6 @@ type dialOptions struct { balancerBuilder balancer.Builder // This is to support grpclb. resolverBuilder resolver.Builder - reqHandshake envconfig.RequireHandshakeSetting channelzParentID int64 disableServiceConfig bool disableRetry bool @@ -96,17 +99,6 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } -// WithWaitForHandshake blocks until the initial settings frame is received from -// the server before assigning RPCs to the connection. -// -// Deprecated: this is the default behavior, and this option will be removed -// after the 1.18 release. -func WithWaitForHandshake() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.reqHandshake = envconfig.RequireHandshakeOn - }) -} - // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is @@ -152,7 +144,8 @@ func WithInitialConnWindowSize(s int32) DialOption { // WithMaxMsgSize returns a DialOption which sets the maximum message size the // client can receive. // -// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. func WithMaxMsgSize(s int) DialOption { return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) } @@ -168,7 +161,8 @@ func WithDefaultCallOptions(cos ...CallOption) DialOption { // WithCodec returns a DialOption which sets a codec for message marshaling and // unmarshaling. // -// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. func WithCodec(c Codec) DialOption { return WithDefaultCallOptions(CallCustomCodec(c)) } @@ -177,7 +171,7 @@ func WithCodec(c Codec) DialOption { // message compression. It has lower priority than the compressor set by the // UseCompressor CallOption. // -// Deprecated: use UseCompressor instead. +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. func WithCompressor(cp Compressor) DialOption { return newFuncDialOption(func(o *dialOptions) { o.cp = cp @@ -192,7 +186,8 @@ func WithCompressor(cp Compressor) DialOption { // message. If no compressor is registered for the encoding, an Unimplemented // status error will be returned. // -// Deprecated: use encoding.RegisterCompressor instead. +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. func WithDecompressor(dc Decompressor) DialOption { return newFuncDialOption(func(o *dialOptions) { o.dc = dc @@ -203,7 +198,7 @@ func WithDecompressor(dc Decompressor) DialOption { // Name resolver will be ignored if this DialOption is specified. // // Deprecated: use the new balancer APIs in balancer package and -// WithBalancerName. +// WithBalancerName. Will be removed in a future 1.x release. func WithBalancer(b Balancer) DialOption { return newFuncDialOption(func(o *dialOptions) { o.balancerBuilder = &balancerWrapperBuilder{ @@ -219,7 +214,8 @@ func WithBalancer(b Balancer) DialOption { // The balancer cannot be overridden by balancer option specified by service // config. // -// This is an EXPERIMENTAL API. +// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig +// instead. Will be removed in a future 1.x release. func WithBalancerName(balancerName string) DialOption { builder := balancer.Get(balancerName) if builder == nil { @@ -240,9 +236,10 @@ func withResolverBuilder(b resolver.Builder) DialOption { // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // -// Deprecated: service config should be received through name resolver, as -// specified here. -// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be +// removed in a future 1.x release. func WithServiceConfig(c <-chan ServiceConfig) DialOption { return newFuncDialOption(func(o *dialOptions) { o.scChan = c @@ -325,7 +322,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a // ClientConn initially. This is valid if and only if WithBlock() is present. // -// Deprecated: use DialContext and context.WithTimeout instead. +// Deprecated: use DialContext and context.WithTimeout instead. Will be +// supported throughout 1.x. func WithTimeout(d time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.timeout = d @@ -352,7 +350,8 @@ func init() { // is returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. // -// Deprecated: use WithContextDialer instead +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { return WithContextDialer( func(ctx context.Context, addr string) (net.Conn, error) { @@ -414,6 +413,17 @@ func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { }) } +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + // WithStreamInterceptor returns a DialOption that specifies the interceptor for // streaming RPCs. func WithStreamInterceptor(f StreamClientInterceptor) DialOption { @@ -422,6 +432,17 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption { }) } +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + // WithAuthority returns a DialOption that specifies the value to be used as the // :authority pseudo-header. This value only works with WithInsecure and has no // effect if TransportCredentials are present. @@ -440,12 +461,12 @@ func WithChannelzParentID(id int64) DialOption { }) } -// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any // service config provided by the resolver and provides a hint to the resolver // to not fetch service configs. // -// Note that, this dial option only disables service config from resolver. If -// default service config is provided, grpc will use the default service config. +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. func WithDisableServiceConfig() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableServiceConfig = true @@ -454,8 +475,10 @@ func WithDisableServiceConfig() DialOption { // WithDefaultServiceConfig returns a DialOption that configures the default // service config, which will be used in cases where: -// 1. WithDisableServiceConfig is called. -// 2. Resolver does not return service config or if the resolver gets and invalid config. +// +// 1. WithDisableServiceConfig is also used. +// 2. Resolver does not return a service config or if the resolver returns an +// invalid service config. // // This API is EXPERIMENTAL. func WithDefaultServiceConfig(s string) DialOption { @@ -511,7 +534,6 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ disableRetry: !envconfig.Retry, - reqHandshake: envconfig.RequireHandshake, healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ WriteBufferSize: defaultWriteBufSize, diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index 9f3ef3a539c6..c1a8340c5bae 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -7,13 +7,13 @@ require ( github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/mock v1.1.1 github.com/golang/protobuf v1.2.0 + github.com/google/go-cmp v0.2.0 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a - golang.org/x/tools v0.0.0-20190311212946-11955173bddd + golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 google.golang.org/appengine v1.1.0 // indirect google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 - honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099 + honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc ) diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go index e15f04c229c4..b43746e616cf 100644 --- a/vendor/google.golang.org/grpc/health/client.go +++ b/vendor/google.golang.org/grpc/health/client.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" @@ -51,7 +52,11 @@ func init() { internal.HealthCheckFunc = clientHealthCheck } -func clientHealthCheck(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), service string) error { +const healthCheckMethod = "/grpc.health.v1.Health/Watch" + +// This function implements the protocol defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error { tryCnt := 0 retryConnection: @@ -65,7 +70,8 @@ retryConnection: if ctx.Err() != nil { return nil } - rawS, err := newStream() + setConnectivityState(connectivity.Connecting) + rawS, err := newStream(healthCheckMethod) if err != nil { continue retryConnection } @@ -73,7 +79,7 @@ retryConnection: s, ok := rawS.(grpc.ClientStream) // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. if !ok { - reportHealth(true) + setConnectivityState(connectivity.Ready) return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) } @@ -89,19 +95,23 @@ retryConnection: // Reports healthy for the LBing purposes if health check is not implemented in the server. if status.Code(err) == codes.Unimplemented { - reportHealth(true) + setConnectivityState(connectivity.Ready) return err } // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. if err != nil { - reportHealth(false) + setConnectivityState(connectivity.TransientFailure) continue retryConnection } // As a message has been received, removes the need for backoff for the next retry by reseting the try count. tryCnt = 0 - reportHealth(resp.Status == healthpb.HealthCheckResponse_SERVING) + if resp.Status == healthpb.HealthCheckResponse_SERVING { + setConnectivityState(connectivity.Ready) + } else { + setConnectivityState(connectivity.TransientFailure) + } } } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 041520d35199..f0744f9937e9 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,7 @@ package channelz import ( + "fmt" "sort" "sync" "sync/atomic" @@ -95,9 +96,14 @@ func (d *dbWrapper) get() *channelMap { // NewChannelzStorage initializes channelz data storage and id generator. // +// This function returns a cleanup function to wait for all channelz state to be reset by the +// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests +// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen +// to remove some entity just register by the new test, since the id space is the same. +// // Note: This function is exported for testing purpose only. User should not call // it in most cases. -func NewChannelzStorage() { +func NewChannelzStorage() (cleanup func() error) { db.set(&channelMap{ topLevelChannels: make(map[int64]struct{}), channels: make(map[int64]*channel), @@ -107,6 +113,28 @@ func NewChannelzStorage() { subChannels: make(map[int64]*subChannel), }) idGen.reset() + return func() error { + var err error + cm := db.get() + if cm == nil { + return nil + } + for i := 0; i < 1000; i++ { + cm.mu.Lock() + if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { + cm.mu.Unlock() + // all things stored in the channelz map have been cleared. + return nil + } + cm.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + cm.mu.Lock() + err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) + cm.mu.Unlock() + return err + } } // GetTopChannels returns a slice of top channel's ChannelMetric, along with a diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 11be7cd08c50..3ee8740f1f93 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -25,40 +25,11 @@ import ( ) const ( - prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" - requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE" -) - -// RequireHandshakeSetting describes the settings for handshaking. -type RequireHandshakeSetting int - -const ( - // RequireHandshakeOn indicates to wait for handshake before considering a - // connection ready/successful. - RequireHandshakeOn RequireHandshakeSetting = iota - // RequireHandshakeOff indicates to not wait for handshake before - // considering a connection ready/successful. - RequireHandshakeOff + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" ) var ( // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". Retry = strings.EqualFold(os.Getenv(retryStr), "on") - // RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE - // environment variable. - // - // Will be removed after the 1.18 release. - RequireHandshake = RequireHandshakeOn ) - -func init() { - switch strings.ToLower(os.Getenv(requireHandshakeStr)) { - case "on": - fallthrough - default: - RequireHandshake = RequireHandshakeOn - case "off": - RequireHandshake = RequireHandshakeOff - } -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c1d2c690ca4e..bc1f99ac8030 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -23,6 +23,8 @@ package internal import ( "context" "time" + + "google.golang.org/grpc/connectivity" ) var ( @@ -37,10 +39,25 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second + // ParseServiceConfig is a function to parse JSON service configs into + // opaque data structures. + ParseServiceConfig func(sc string) (interface{}, error) + // StatusRawProto is exported by status/status.go. This func returns a + // pointer to the wrapped Status proto for a given status.Status without a + // call to proto.Clone(). The returned Status proto should not be mutated by + // the caller. + StatusRawProto interface{} // func (*status.Status) *spb.Status ) // HealthChecker defines the signature of the client-side LB channel health checking function. -type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 204ba1588bbf..b8e0aa4db275 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -23,6 +23,7 @@ import ( "fmt" "runtime" "sync" + "sync/atomic" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" @@ -84,12 +85,24 @@ func (il *itemList) isEmpty() bool { // the control buffer of transport. They represent different aspects of // control tasks, e.g., flow control, settings, streaming resetting, etc. +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + // registerStream is used to register an incoming stream with loopy writer. type registerStream struct { streamID uint32 wq *writeQuota } +func (*registerStream) isTransportResponseFrame() bool { return false } + // headerFrame is also used to register stream on the client-side. type headerFrame struct { streamID uint32 @@ -102,6 +115,10 @@ type headerFrame struct { onOrphaned func(error) // Valid on client-side } +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + type cleanupStream struct { streamID uint32 rst bool @@ -109,6 +126,8 @@ type cleanupStream struct { onWrite func() } +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + type dataFrame struct { streamID uint32 endStream bool @@ -119,27 +138,41 @@ type dataFrame struct { onEachWrite func() } +func (*dataFrame) isTransportResponseFrame() bool { return false } + type incomingWindowUpdate struct { streamID uint32 increment uint32 } +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + type outgoingWindowUpdate struct { streamID uint32 increment uint32 } +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + type incomingSettings struct { ss []http2.Setting } +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + type outgoingSettings struct { ss []http2.Setting } +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + type incomingGoAway struct { } +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + type goAway struct { code http2.ErrCode debugData []byte @@ -147,15 +180,21 @@ type goAway struct { closeConn bool } +func (*goAway) isTransportResponseFrame() bool { return false } + type ping struct { ack bool data [8]byte } +func (*ping) isTransportResponseFrame() bool { return true } + type outFlowControlSizeRequest struct { resp chan uint32 } +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -238,6 +277,14 @@ type controlBuffer struct { consumerWaiting bool list *itemList err error + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Value // *chan struct{} } func newControlBuffer(done <-chan struct{}) *controlBuffer { @@ -248,12 +295,24 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer { } } -func (c *controlBuffer) put(it interface{}) error { +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { + ch, _ := c.trfChan.Load().(*chan struct{}) + if ch != nil { + select { + case <-*ch: + case <-c.done: + } + } +} + +func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -271,6 +330,15 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{ c.consumerWaiting = false } c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + ch := make(chan struct{}) + c.trfChan.Store(&ch) + } + } c.mu.Unlock() if wakeUp { select { @@ -304,7 +372,17 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { return nil, c.err } if !c.list.isEmpty() { - h := c.list.dequeue() + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(*chan struct{}) + close(*ch) + c.trfChan.Store((*chan struct{})(nil)) + } + c.transportResponseFrames-- + } c.mu.Unlock() return h, nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index 5ea997a7e45b..f262edd8ecda 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -149,6 +149,7 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 { n = uint32(math.MaxInt32) } f.mu.Lock() + defer f.mu.Unlock() // estSenderQuota is the receiver's view of the maximum number of bytes the sender // can send without a window update. estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) @@ -169,10 +170,8 @@ func (f *inFlow) maybeAdjust(n uint32) uint32 { // is padded; We will fallback on the current available window(at least a 1/4th of the limit). f.delta = n } - f.mu.Unlock() return f.delta } - f.mu.Unlock() return 0 } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index f2de84d43a81..78f9ddc3d3ab 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,6 +24,7 @@ package transport import ( + "bytes" "context" "errors" "fmt" @@ -347,7 +348,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ht.stats.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, windowHandler: func(int) {}, } @@ -361,7 +362,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace for buf := make([]byte, readSize); ; { n, err := req.Body.Read(buf) if n > 0 { - s.buf.put(recvMsg{data: buf[:n:n]}) + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) buf = buf[n:] } if err != nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 9dee6db61d9d..41a79c567027 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -117,6 +117,8 @@ type http2Client struct { onGoAway func(GoAwayReason) onClose func() + + bufferPool *bufferPool } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { @@ -249,6 +251,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne onGoAway: onGoAway, onClose: onClose, keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), } t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { @@ -367,6 +370,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, + freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -437,6 +441,15 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } for _, vv := range added { for i, v := range vv { if i%2 == 0 { @@ -450,15 +463,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) } } - for k, vv := range md { - // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } } if md, ok := t.md.(*metadata.MD); ok { for k, vv := range *md { @@ -489,6 +493,9 @@ func (t *http2Client) createAudience(callHdr *CallHdr) string { } func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } authData := map[string]string{} for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) @@ -509,7 +516,7 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s } func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { - callAuthData := map[string]string{} + var callAuthData map[string]string // Check if credentials.PerRPCCredentials were provided via call options. // Note: if these credentials are provided both via dial options and call // options, then both sets of credentials will be applied. @@ -521,6 +528,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call if err != nil { return nil, status.Errorf(codes.Internal, "transport: %v", err) } + callAuthData = make(map[string]string, len(data)) for k, v := range data { // Capital header names are illegal in HTTP/2 k = strings.ToLower(k) @@ -549,10 +557,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. - if atomic.SwapUint32(&s.headerDone, 1) == 0 { + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { close(s.headerChan) } - } hdr := &headerFrame{ hf: headerFields, @@ -713,7 +720,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. s.write(recvMsg{err: err}) } // If headerChan isn't closed, then close it. - if atomic.SwapUint32(&s.headerDone, 1) == 0 { + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { s.noHeaders = true close(s.headerChan) } @@ -765,6 +772,9 @@ func (t *http2Client) Close() error { t.mu.Unlock() return nil } + // Call t.onClose before setting the state to closing to prevent the client + // from attempting to create new streams ASAP. + t.onClose() t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -785,7 +795,6 @@ func (t *http2Client) Close() error { } t.statsHandler.HandleConn(t.ctx, connEnd) } - t.onClose() return err } @@ -794,21 +803,21 @@ func (t *http2Client) Close() error { // stream is closed. If there are no active streams, the transport is closed // immediately. This does nothing if the transport is already draining or // closing. -func (t *http2Client) GracefulClose() error { +func (t *http2Client) GracefulClose() { t.mu.Lock() // Make sure we move to draining only from active. if t.state == draining || t.state == closing { t.mu.Unlock() - return nil + return } t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - return t.Close() + t.Close() + return } t.controlBuf.put(&incomingGoAway{}) - return nil } // Write formats the data into HTTP2 data frame(s) and sends it out. The caller @@ -946,9 +955,10 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - data := make([]byte, len(f.Data())) - copy(data, f.Data()) - s.write(recvMsg{data: data}) + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) } } // The server has closed the stream without sending trailers. Record that @@ -973,9 +983,9 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { statusCode = codes.Unknown } if statusCode == codes.Canceled { - // Our deadline was already exceeded, and that was likely the cause of - // this cancelation. Alter the status code accordingly. - if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancelation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1080,11 +1090,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { default: t.setGoAwayReason(f) close(t.goAway) - t.state = draining t.controlBuf.put(&incomingGoAway{}) - - // This has to be a new goroutine because we're still using the current goroutine to read in the transport. + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. t.onGoAway(t.goAwayReason) + t.state = draining } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1142,26 +1153,24 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } endStream := frame.StreamEnded() atomic.StoreUint32(&s.bytesReceived, 1) - initialHeader := atomic.SwapUint32(&s.headerDone, 1) == 0 + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { - // As specified by RFC 7540, a HEADERS frame (and associated CONTINUATION frames) can only appear - // at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) return } state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC ResponseHeader has been received - // which indicates peer speaking gRPC, we are in gRPC mode. + // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. state.data.isGRPC = !initialHeader if err := state.decodeHeader(frame); err != nil { t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) return } - var isHeader bool + isHeader := false defer func() { if t.statsHandler != nil { if isHeader { @@ -1180,10 +1189,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } }() - // If headers haven't been received yet. - if initialHeader { + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { if !endStream { - // Headers frame is ResponseHeader. + // HEADERS frame block carries a Response-Headers. isHeader = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed @@ -1192,14 +1201,17 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(state.data.mdata) > 0 { s.header = state.data.mdata } - close(s.headerChan) - return + } else { + // HEADERS frame block carries a Trailers-Only. + s.noHeaders = true } - // Headers frame is Trailers-only. - s.noHeaders = true close(s.headerChan) } + if !endStream { + return + } + // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) @@ -1233,6 +1245,7 @@ func (t *http2Client) reader() { // loop to keep reading incoming messages on this transport. for { + t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() if t.keepaliveEnabled { atomic.CompareAndSwapUint32(&t.activity, 0, 1) @@ -1320,6 +1333,7 @@ func (t *http2Client) keepalive() { timer.Reset(t.kp.Time) continue } + infof("transport: closing client transport due to idleness.") t.Close() return case <-t.ctx.Done(): diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 435092e5c853..83439b5627d9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,9 +35,11 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/keepalive" @@ -55,6 +57,9 @@ var ( // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + // statusRawProto is a function to get to the raw status proto wrapped in a + // status.Status without a proto.Clone(). + statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status) ) // http2Server implements the ServerTransport interface with HTTP2. @@ -119,6 +124,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelzID int64 // channelz unique identification number czData *channelzData + bufferPool *bufferPool } // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is @@ -220,6 +226,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err kep: kep, initialWindowSize: iwz, czData: new(channelzData), + bufferPool: newBufferPool(), } t.controlBuf = newControlBuffer(t.ctxDone) if dynamicWindow { @@ -405,9 +412,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -428,6 +436,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { defer close(t.readerDone) for { + t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() atomic.StoreUint32(&t.activity, 1) if err != nil { @@ -591,9 +600,10 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - data := make([]byte, len(f.Data())) - copy(data, f.Data()) - s.write(recvMsg{data: data}) + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) } } if f.Header().Flags.Has(http2.FlagDataEndStream) { @@ -757,6 +767,10 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { return nil } +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. @@ -771,9 +785,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { streamID: s.id, hf: headerFields, endStream: false, - onWrite: func() { - atomic.StoreUint32(&t.resetPingStrikes, 1) - }, + onWrite: t.setResetPingStrikes, }) if !success { if err != nil { @@ -817,7 +829,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - if p := st.Proto(); p != nil && len(p.Details) > 0 { + if p := statusRawProto(st); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. @@ -833,9 +845,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { streamID: s.id, hf: headerFields, endStream: true, - onWrite: func() { - atomic.StoreUint32(&t.resetPingStrikes, 1) - }, + onWrite: t.setResetPingStrikes, } s.hdrMu.Unlock() success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) @@ -887,12 +897,10 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e hdr = append(hdr, data[:emptyLen]...) data = data[emptyLen:] df := &dataFrame{ - streamID: s.id, - h: hdr, - d: data, - onEachWrite: func() { - atomic.StoreUint32(&t.resetPingStrikes, 1) - }, + streamID: s.id, + h: hdr, + d: data, + onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { select { @@ -958,6 +966,7 @@ func (t *http2Server) keepalive() { select { case <-maxAge.C: // Close the connection after grace period. + infof("transport: closing server transport due to maximum connection age.") t.Close() // Resetting the timer so that the clean-up doesn't deadlock. maxAge.Reset(infinity) @@ -971,6 +980,7 @@ func (t *http2Server) keepalive() { continue } if pingSent { + infof("transport: closing server transport due to idleness.") t.Close() // Resetting the timer so that the clean-up doesn't deadlock. keepalive.Reset(infinity) @@ -1019,13 +1029,7 @@ func (t *http2Server) Close() error { } // deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState streamState) { - oldState = s.swapState(streamDone) - if oldState == streamDone { - // If the stream was already done, return. - return oldState - } - +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1047,15 +1051,13 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) (oldState stream atomic.AddInt64(&t.czData.streamsFailed, 1) } } - - return oldState } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { - oldState := t.deleteStream(s, eosReceived) - // If the stream is already closed, then don't put trailing header to controlbuf. + oldState := s.swapState(streamDone) if oldState == streamDone { + // If the stream was already done, return. return } @@ -1063,14 +1065,18 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h streamID: s.id, rst: rst, rstCode: rstCode, - onWrite: func() {}, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, } t.controlBuf.put(hdr) } // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + s.swapState(streamDone) t.deleteStream(s, eosReceived) + t.controlBuf.put(&cleanupStream{ streamID: s.id, rst: rst, diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 7f82cbb080df..1c1d106709ac 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,6 +22,7 @@ package transport import ( + "bytes" "context" "errors" "fmt" @@ -39,10 +40,32 @@ import ( "google.golang.org/grpc/tap" ) +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - data []byte + buffer *bytes.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -117,8 +140,9 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last []byte // Stores the remaining data in the previous calls. + last *bytes.Buffer // Stores the remaining data in the previous calls. err error + freeBuffer func(*bytes.Buffer) } // Read reads the next len(p) bytes from last. If last is drained, it tries to @@ -128,10 +152,13 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) { if r.err != nil { return 0, r.err } - if r.last != nil && len(r.last) > 0 { + if r.last != nil { // Read remaining data left in last call. - copied := copy(p, r.last) - r.last = r.last[copied:] + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } return copied, nil } if r.closeStream != nil { @@ -157,6 +184,19 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // r.readAdditional acts on that message and returns the necessary error. select { case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readAdditional(m, p) @@ -170,8 +210,13 @@ func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error if m.err != nil { return 0, m.err } - copied := copy(p, m.data) - r.last = m.data[copied:] + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } return copied, nil } @@ -204,8 +249,8 @@ type Stream struct { // is used to adjust flow control, if needed. requestRead func(int) - headerChan chan struct{} // closed to indicate the end of header metadata. - headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -266,6 +311,14 @@ func (s *Stream) waitOnHeader() error { } select { case <-s.ctx.Done(): + // We prefer success over failure when reading messages because we delay + // context error in stream.Read(). To keep behavior consistent, we also + // prefer success here. + select { + case <-s.headerChan: + return nil + default: + } return ContextErr(s.ctx.Err()) case <-s.headerChan: return nil @@ -578,9 +631,12 @@ type ClientTransport interface { // is called only once. Close() error - // GracefulClose starts to tear down the transport. It stops accepting - // new RPCs and wait the completion of the pending RPCs. - GracefulClose() error + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go index c99fdbef4353..f4c1c8b68947 100644 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -17,9 +17,8 @@ */ // Package naming defines the naming API and related data structures for gRPC. -// The interface is EXPERIMENTAL and may be subject to change. // -// Deprecated: please use package resolver. +// This package is deprecated: please use package resolver instead. package naming // Operation defines the corresponding operations for a name resolution change. diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index f9625496c403..45baa2ae13da 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -120,6 +120,14 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. bp.mu.Unlock() select { case <-ctx.Done(): + if connectionErr := bp.connectionError(); connectionErr != nil { + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr) + case context.Canceled: + return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr) + } + } return nil, nil, ctx.Err() case <-ch: } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index d1e38aad778b..ed05b02ed96e 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -51,14 +51,18 @@ type pickfirstBalancer struct { func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { if err != nil { - grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + } return } if b.sc == nil { b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) if err != nil { //TODO(yuxuanli): why not change the cc state to Idle? - grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if grpclog.V(2) { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } return } b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) @@ -70,9 +74,13 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er } func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + } if b.sc != sc { - grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + } return } if s == connectivity.Shutdown { diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 000000000000..76acbbcc93b9 --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// This API is EXPERIMENTAL. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index 58355990779b..297492e87af4 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -66,6 +66,9 @@ var ( var ( defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second ) var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { @@ -241,7 +244,13 @@ func (d *dnsResolver) watcher() { return case <-d.t.C: case <-d.rn: + if !d.t.Stop() { + // Before resetting a timer, it should be stopped to prevent racing with + // reads on it's channel. + <-d.t.C + } } + result, sc := d.lookup() // Next lookup should happen within an interval defined by d.freq. It may be // more often due to exponential retry on empty address list. @@ -254,6 +263,16 @@ func (d *dnsResolver) watcher() { } d.cc.NewServiceConfig(sc) d.cc.NewAddress(result) + + // Sleep to prevent excessive re-resolutions. Incoming resolution requests + // will be queued in d.rn. + t := time.NewTimer(minDNSResRate) + select { + case <-t.C: + case <-d.ctx.Done(): + t.Stop() + return + } } } diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 52ec603daa77..e83da346a5cd 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -20,6 +20,10 @@ // All APIs in this package are experimental. package resolver +import ( + "google.golang.org/grpc/serviceconfig" +) + var ( // m is a map from scheme to resolver builder. m = make(map[string]Builder) @@ -100,11 +104,12 @@ type BuildOption struct { // State contains the current Resolver state relevant to the ClientConn. type State struct { - Addresses []Address // Resolved addresses for the target - ServiceConfig string // JSON representation of the service config + Addresses []Address // Resolved addresses for the target + // ServiceConfig is the parsed service config; obtained from + // serviceconfig.Parse. + ServiceConfig serviceconfig.Config // TODO: add Err error - // TODO: add ParsedServiceConfig interface{} } // ClientConn contains the callbacks for resolver to notify any updates @@ -132,6 +137,21 @@ type ClientConn interface { // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext by the user. And +// grpc passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will +// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed +// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// +// If the target does not contain a scheme, we will apply the default scheme, and set the Target to +// be the full target string. e.g. "foo.bar" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// +// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the +// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target +// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. type Target struct { Scheme string Authority string diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index e9cef3a92b55..6934905b0f6c 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -138,19 +138,22 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { return } grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + c, err := parseServiceConfig(sc) + if err != nil { + return + } if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: sc}) + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c}) } - ccr.curState.ServiceConfig = sc + ccr.curState.ServiceConfig = c ccr.cc.updateResolverState(ccr.curState) } func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - if s.ServiceConfig == ccr.curState.ServiceConfig && (len(ccr.curState.Addresses) == 0) == (len(s.Addresses) == 0) { - return - } var updates []string - if s.ServiceConfig != ccr.curState.ServiceConfig { + oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig) + newSC, newOK := s.ServiceConfig.(*ServiceConfig) + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { updates = append(updates, "service config updated") } if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2a595622d3cc..088c3f1b2528 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -694,14 +694,34 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf return nil } +// Information about RPC type rpcInfo struct { - failfast bool + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor } type rpcInfoContextKey struct{} -func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { - return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) } func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8115828fdf00..f064b73e555d 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -42,6 +42,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -56,6 +57,8 @@ const ( defaultServerMaxSendMessageSize = math.MaxInt32 ) +var statusOK = status.New(codes.OK, "") + type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. @@ -86,21 +89,19 @@ type service struct { // Server is a gRPC server to serve RPC requests. type Server struct { - opts options + opts serverOptions mu sync.Mutex // guards following lis map[net.Listener]bool - conns map[io.Closer]bool + conns map[transport.ServerTransport]bool serve bool drain bool cv *sync.Cond // signaled when connections close for GracefulStop m map[string]*service // service name -> service info events trace.EventLog - quit chan struct{} - done chan struct{} - quitOnce sync.Once - doneOnce sync.Once + quit *grpcsync.Event + done *grpcsync.Event channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop @@ -108,7 +109,7 @@ type Server struct { czData *channelzData } -type options struct { +type serverOptions struct { creds credentials.TransportCredentials codec baseCodec cp Compressor @@ -131,7 +132,7 @@ type options struct { maxHeaderListSize *uint32 } -var defaultServerOptions = options{ +var defaultServerOptions = serverOptions{ maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, @@ -140,7 +141,33 @@ var defaultServerOptions = options{ } // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. -type ServerOption func(*options) +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// This API is EXPERIMENTAL. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} // WriteBufferSize determines how much data can be batched before doing a write on the wire. // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. @@ -148,9 +175,9 @@ type ServerOption func(*options) // Zero will disable the write buffer such that each write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.writeBufferSize = s - } + }) } // ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most @@ -159,25 +186,25 @@ func WriteBufferSize(s int) ServerOption { // Zero will disable read buffer for a connection so data framer can access the underlying // conn directly. func ReadBufferSize(s int) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s - } + }) } // InitialWindowSize returns a ServerOption that sets window size for stream. // The lower bound for window size is 64K and any value smaller than that will be ignored. func InitialWindowSize(s int32) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.initialWindowSize = s - } + }) } // InitialConnWindowSize returns a ServerOption that sets window size for a connection. // The lower bound for window size is 64K and any value smaller than that will be ignored. func InitialConnWindowSize(s int32) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.initialConnWindowSize = s - } + }) } // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. @@ -187,25 +214,25 @@ func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { kp.Time = time.Second } - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.keepaliveParams = kp - } + }) } // KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.keepalivePolicy = kep - } + }) } // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. // // This will override any lookups by content-subtype for Codecs registered with RegisterCodec. func CustomCodec(codec Codec) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.codec = codec - } + }) } // RPCCompressor returns a ServerOption that sets a compressor for outbound @@ -216,9 +243,9 @@ func CustomCodec(codec Codec) ServerOption { // // Deprecated: use encoding.RegisterCompressor instead. func RPCCompressor(cp Compressor) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.cp = cp - } + }) } // RPCDecompressor returns a ServerOption that sets a decompressor for inbound @@ -227,9 +254,9 @@ func RPCCompressor(cp Compressor) ServerOption { // // Deprecated: use encoding.RegisterCompressor instead. func RPCDecompressor(dc Decompressor) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.dc = dc - } + }) } // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. @@ -243,73 +270,73 @@ func MaxMsgSize(m int) ServerOption { // MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. // If this is not set, gRPC uses the default 4MB. func MaxRecvMsgSize(m int) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.maxReceiveMessageSize = m - } + }) } // MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. // If this is not set, gRPC uses the default `math.MaxInt32`. func MaxSendMsgSize(m int) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.maxSendMessageSize = m - } + }) } // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n - } + }) } // Creds returns a ServerOption that sets credentials for server connections. func Creds(c credentials.TransportCredentials) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.creds = c - } + }) } // UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the // server. Only one unary interceptor can be installed. The construction of multiple // interceptors (e.g., chaining) can be implemented at the caller. func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { if o.unaryInt != nil { panic("The unary server interceptor was already set and may not be reset.") } o.unaryInt = i - } + }) } // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the // server. Only one stream interceptor can be installed. func StreamInterceptor(i StreamServerInterceptor) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { if o.streamInt != nil { panic("The stream server interceptor was already set and may not be reset.") } o.streamInt = i - } + }) } // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. func InTapHandle(h tap.ServerInHandle) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { if o.inTapHandle != nil { panic("The tap handle was already set and may not be reset.") } o.inTapHandle = h - } + }) } // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.statsHandler = h - } + }) } // UnknownServiceHandler returns a ServerOption that allows for adding a custom @@ -319,7 +346,7 @@ func StatsHandler(h stats.Handler) ServerOption { // The handling function has full access to the Context of the request and the // stream, and the invocation bypasses interceptors. func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.unknownStreamDesc = &StreamDesc{ StreamName: "unknown_service_handler", Handler: streamHandler, @@ -327,7 +354,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { ClientStreams: true, ServerStreams: true, } - } + }) } // ConnectionTimeout returns a ServerOption that sets the timeout for @@ -337,17 +364,17 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // // This API is EXPERIMENTAL. func ConnectionTimeout(d time.Duration) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.connectionTimeout = d - } + }) } // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size // of header list that the server is prepared to accept. func MaxHeaderListSize(s uint32) ServerOption { - return func(o *options) { + return newFuncServerOption(func(o *serverOptions) { o.maxHeaderListSize = &s - } + }) } // NewServer creates a gRPC server which has no service registered and has not @@ -355,15 +382,15 @@ func MaxHeaderListSize(s uint32) ServerOption { func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions for _, o := range opt { - o(&opts) + o.apply(&opts) } s := &Server{ lis: make(map[net.Listener]bool), opts: opts, - conns: make(map[io.Closer]bool), + conns: make(map[transport.ServerTransport]bool), m: make(map[string]*service), - quit: make(chan struct{}), - done: make(chan struct{}), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), czData: new(channelzData), } s.cv = sync.NewCond(&s.mu) @@ -530,11 +557,9 @@ func (s *Server) Serve(lis net.Listener) error { s.serveWG.Add(1) defer func() { s.serveWG.Done() - select { - // Stop or GracefulStop called; block until done and return nil. - case <-s.quit: - <-s.done - default: + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() } }() @@ -577,7 +602,7 @@ func (s *Server) Serve(lis net.Listener) error { timer := time.NewTimer(tempDelay) select { case <-timer.C: - case <-s.quit: + case <-s.quit.Done(): timer.Stop() return nil } @@ -587,10 +612,8 @@ func (s *Server) Serve(lis net.Listener) error { s.printf("done serving; Accept = %v", err) s.mu.Unlock() - select { - case <-s.quit: + if s.quit.HasFired() { return nil - default: } return err } @@ -611,6 +634,10 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn forks a goroutine to handle a just-accepted connection that // has not had any I/O performed on it yet. func (s *Server) handleRawConn(rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) conn, authInfo, err := s.useTransportAuthenticator(rawConn) if err != nil { @@ -627,14 +654,6 @@ func (s *Server) handleRawConn(rawConn net.Conn) { return } - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - conn.Close() - return - } - s.mu.Unlock() - // Finish handshaking (HTTP2) st := s.newHTTP2Transport(conn, authInfo) if st == nil { @@ -742,6 +761,9 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { // traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. // If tracing is not enabled, it returns nil. func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } tr, ok := trace.FromContext(stream.Context()) if !ok { return nil @@ -760,27 +782,27 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea return trInfo } -func (s *Server) addConn(c io.Closer) bool { +func (s *Server) addConn(st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - c.Close() + st.Close() return false } if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - c.(transport.ServerTransport).Drain() + st.Drain() } - s.conns[c] = true + s.conns[st] = true return true } -func (s *Server) removeConn(c io.Closer) { +func (s *Server) removeConn(st transport.ServerTransport) { s.mu.Lock() defer s.mu.Unlock() if s.conns != nil { - delete(s.conns, c) + delete(s.conns, st) s.cv.Broadcast() } } @@ -952,10 +974,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } if sh != nil { sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - Data: d, - Length: len(d), + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength, + Data: d, + Length: len(d), }) } if binlog != nil { @@ -1051,7 +1074,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? - err = t.WriteStatus(stream, status.New(codes.OK, "")) + err = t.WriteStatus(stream, statusOK) if binlog != nil { binlog.Log(&binarylog.ServerTrailer{ Trailer: stream.Trailer(), @@ -1209,7 +1232,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - err = t.WriteStatus(ss.s, status.New(codes.OK, "")) + err = t.WriteStatus(ss.s, statusOK) if ss.binlog != nil { ss.binlog.Log(&binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), @@ -1326,15 +1349,11 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quitOnce.Do(func() { - close(s.quit) - }) + s.quit.Fire() defer func() { s.serveWG.Wait() - s.doneOnce.Do(func() { - close(s.done) - }) + s.done.Fire() }() s.channelzRemoveOnce.Do(func() { @@ -1371,15 +1390,8 @@ func (s *Server) Stop() { // accepting new connections and RPCs and blocks until all the pending RPCs are // finished. func (s *Server) GracefulStop() { - s.quitOnce.Do(func() { - close(s.quit) - }) - - defer func() { - s.doneOnce.Do(func() { - close(s.done) - }) - }() + s.quit.Fire() + defer s.done.Fire() s.channelzRemoveOnce.Do(func() { if channelz.IsOn() { @@ -1397,8 +1409,8 @@ func (s *Server) GracefulStop() { } s.lis = nil if !s.drain { - for c := range s.conns { - c.(transport.ServerTransport).Drain() + for st := range s.conns { + st.Drain() } s.drain = true } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 1c5227426f49..d0787f1e2a1c 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -25,8 +25,11 @@ import ( "strings" "time" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/serviceconfig" ) const maxInt = int(^uint(0) >> 1) @@ -61,6 +64,11 @@ type MethodConfig struct { retryPolicy *retryPolicy } +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + // ServiceConfig is provided by the service provider and contains parameters for how // clients that connect to the service should behave. // @@ -68,10 +76,18 @@ type MethodConfig struct { // through name resolver, as specified here // https://github.com/grpc/grpc/blob/master/doc/service_config.md type ServiceConfig struct { - // LB is the load balancer the service providers recommends. The balancer specified - // via grpc.WithBalancer will override this. + serviceconfig.Config + + // LB is the load balancer the service providers recommends. The balancer + // specified via grpc.WithBalancer will override this. This is deprecated; + // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig + // will be used. LB *string + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + // Methods contains a map for the methods in this service. If there is an // exact match for a method (i.e. /service/method) in the map, use the // corresponding MethodConfig. If there's no exact match, look for the @@ -233,15 +249,27 @@ type jsonMC struct { RetryPolicy *jsonRetryPolicy } +type loadBalancingConfig map[string]json.RawMessage + // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { LoadBalancingPolicy *string + LoadBalancingConfig *[]loadBalancingConfig MethodConfig *[]jsonMC RetryThrottling *retryThrottlingPolicy HealthCheckConfig *healthCheckConfig } +func init() { + internal.ParseServiceConfig = func(sc string) (interface{}, error) { + return parseServiceConfig(sc) + } +} + func parseServiceConfig(js string) (*ServiceConfig, error) { + if len(js) == 0 { + return nil, fmt.Errorf("no JSON service config provided") + } var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { @@ -255,10 +283,38 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { healthCheckConfig: rsc.HealthCheckConfig, rawJSONString: js, } + if rsc.LoadBalancingConfig != nil { + for i, lbcfg := range *rsc.LoadBalancingConfig { + if len(lbcfg) != 1 { + err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + grpclog.Warningf(err.Error()) + return nil, err + } + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range lbcfg { + } + builder := balancer.Get(name) + if builder == nil { + continue + } + sc.lbConfig = &lbConfig{name: name} + if parser, ok := builder.(balancer.ConfigParser); ok { + var err error + sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) + if err != nil { + return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + } else if string(jsonCfg) != "{}" { + grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + break + } + } + if rsc.MethodConfig == nil { return &sc, nil } - for _, m := range *rsc.MethodConfig { if m.Name == nil { continue @@ -299,11 +355,11 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { } if sc.retryThrottling != nil { - if sc.retryThrottling.MaxTokens <= 0 || - sc.retryThrottling.MaxTokens > 1000 || - sc.retryThrottling.TokenRatio <= 0 { - // Illegal throttling config; disable throttling. - sc.retryThrottling = nil + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt) + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr) } } return &sc, nil diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 000000000000..53b27875a1ac --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,48 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// This package is EXPERIMENTAL. +package serviceconfig + +import ( + "google.golang.org/grpc/internal" +) + +// Config represents an opaque data structure holding a service config. +type Config interface { + isConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancer config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// Parse parses the JSON service config provided into an internal form or +// returns an error if the config is invalid. +func Parse(ServiceConfigJSON string) (Config, error) { + c, err := internal.ParseServiceConfig(ServiceConfigJSON) + if err != nil { + return nil, err + } + return c.(Config), err +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index ed36681bb546..a1348e9b16bd 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -36,8 +36,15 @@ import ( "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" ) +func init() { + internal.StatusRawProto = statusRawProto +} + +func statusRawProto(s *Status) *spb.Status { return s.s } + // statusError is an alias of a status proto. It implements error and Status, // and a nil statusError should never be returned by this package. type statusError spb.Status @@ -51,6 +58,17 @@ func (se *statusError) GRPCStatus() *Status { return &Status{s: (*spb.Status)(se)} } +// Is implements future error.Is functionality. +// A statusError is equivalent if the code and message are identical. +func (se *statusError) Is(target error) bool { + tse, ok := target.(*statusError) + if !ok { + return false + } + + return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) +} + // Status represents an RPC status code, message, and details. It is immutable // and should be created with New, Newf, or FromProto. type Status struct { @@ -125,7 +143,7 @@ func FromProto(s *spb.Status) *Status { // Status is returned with codes.Unknown and the original error message. func FromError(err error) (s *Status, ok bool) { if err == nil { - return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true + return nil, true } if se, ok := err.(interface { GRPCStatus() *Status @@ -199,7 +217,7 @@ func Code(err error) codes.Code { func FromContextError(err error) *Status { switch err { case nil: - return New(codes.OK, "") + return nil case context.DeadlineExceeded: return New(codes.DeadlineExceeded, err.Error()) case context.Canceled: diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 6e2bf51e0a09..134a624a15df 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -30,7 +30,6 @@ import ( "golang.org/x/net/trace" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" "google.golang.org/grpc/encoding" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/balancerload" @@ -245,7 +244,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth trInfo.tr.LazyLog(&trInfo.firstLine, false) ctx = trace.NewContext(ctx, trInfo.tr) } - ctx = newContextWithRPCInfo(ctx, c.failFast) + ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) sh := cc.dopts.copts.StatsHandler var beginTime time.Time if sh != nil { @@ -328,13 +327,23 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cs, nil } -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) error { - cs.attempt = &csAttempt{ +// newAttemptLocked creates a new attempt with a transport. +// If it succeeds, then it replaces clientStream's attempt with this new attempt. +func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { + newAttempt := &csAttempt{ cs: cs, dc: cs.cc.dopts.dc, statsHandler: sh, trInfo: trInfo, } + defer func() { + if retErr != nil { + // This attempt is not set in the clientStream, so it's finish won't + // be called. Call it here for stats and trace in case they are not + // nil. + newAttempt.finish(retErr) + } + }() if err := cs.ctx.Err(); err != nil { return toRPCErr(err) @@ -346,8 +355,9 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) er if trInfo != nil { trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) } - cs.attempt.t = t - cs.attempt.done = done + newAttempt.t = t + newAttempt.done = done + cs.attempt = newAttempt return nil } @@ -396,11 +406,18 @@ type clientStream struct { serverHeaderBinlogged bool mu sync.Mutex - firstAttempt bool // if true, transparent retry is valid - numRetries int // exclusive of transparent retry attempt(s) - numRetriesSincePushback int // retries since pushback; to reset backoff - finished bool // TODO: replace with atomic cmpxchg or sync.Once? - attempt *csAttempt // the active client stream attempt + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. committed bool // active attempt committed for retry? buffer []func(a *csAttempt) error // operations to replay on retry @@ -458,8 +475,8 @@ func (cs *clientStream) shouldRetry(err error) error { if cs.attempt.s != nil { <-cs.attempt.s.Done() } - if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { - // First attempt, wait-for-ready, stream unprocessed: transparently retry. + if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + // First attempt, stream unprocessed: transparently retry. cs.firstAttempt = false return nil } @@ -677,15 +694,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if !cs.desc.ClientStreams { cs.sentLast = true } - data, err := encode(cs.codec, m) - if err != nil { - return err - } - compData, err := compress(data, cs.cp, cs.comp) + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) if err != nil { return err } - hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) @@ -808,11 +823,11 @@ func (cs *clientStream) finish(err error) { } if cs.attempt != nil { cs.attempt.finish(err) - } - // after functions all rely upon having a stream. - if cs.attempt.s != nil { - for _, o := range cs.opts { - o.after(cs.callInfo) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo) + } } } cs.cancel() @@ -967,19 +982,18 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, opts ...CallOption) (_ ClientStream, err error) { - ac.mu.Lock() - if ac.transport != t { - ac.mu.Unlock() - return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") - } - // transition to CONNECTING state when an attempt starts - if ac.state != connectivity.Connecting { - ac.updateConnectivityState(connectivity.Connecting) - ac.cc.handleSubConnStateChange(ac.acbw, ac.state) - } - ac.mu.Unlock() - +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { if t == nil { // TODO: return RPC error here? return nil, errors.New("transport provided is nil") @@ -987,14 +1001,6 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. c := &callInfo{} - for _, o := range opts { - if err := o.before(c); err != nil { - return nil, toRPCErr(err) - } - } - c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) - // Possible context leak: // The cancel function for the child context we create will only be called // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if @@ -1007,6 +1013,13 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho } }() + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) if err := setCallInfoCodec(c); err != nil { return nil, err } @@ -1039,6 +1052,7 @@ func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, metho callHdr.Creds = c.creds } + // Use a special addrConnStream to avoid retry. as := &addrConnStream{ callHdr: callHdr, ac: ac, @@ -1150,15 +1164,13 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { if !as.desc.ClientStreams { as.sentLast = true } - data, err := encode(as.codec, m) - if err != nil { - return err - } - compData, err := compress(data, as.cp, as.comp) + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) if err != nil { return err } - hdr, payld := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? if len(payld) > *as.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) @@ -1395,15 +1407,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { ss.t.IncrMsgSent() } }() - data, err := encode(ss.codec, m) - if err != nil { - return err - } - compData, err := compress(data, ss.cp, ss.comp) + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { return err } - hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? if len(payload) > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) @@ -1496,3 +1506,24 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 092e088258da..5411a73a22e3 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.20.1" +const Version = "1.23.0" From 2f54a7cb6b95b2cf7e86994d8ac382c1a932db5b Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Mon, 26 Aug 2019 18:00:01 +0200 Subject: [PATCH 22/77] bump containerd/ttrpc 9abb3e268010ea188f4e4051f77eb5aca49315fb Signed-off-by: Sebastiaan van Stijn --- vendor.conf | 2 +- vendor/github.com/containerd/ttrpc/client.go | 6 +++--- vendor/github.com/containerd/ttrpc/services.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/vendor.conf b/vendor.conf index da4afee4e652..fab4b62013e0 100644 --- a/vendor.conf +++ b/vendor.conf @@ -37,7 +37,7 @@ github.com/Microsoft/go-winio v0.4.14 github.com/Microsoft/hcsshim 9e921883ac929bbe515b39793ece99ce3a9d7706 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 -github.com/containerd/ttrpc 1fb3814edf44a76e0ccf503decf726d994919a9a +github.com/containerd/ttrpc 9abb3e268010ea188f4e4051f77eb5aca49315fb github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 gotest.tools v2.3.0 github.com/google/go-cmp v0.2.0 diff --git a/vendor/github.com/containerd/ttrpc/client.go b/vendor/github.com/containerd/ttrpc/client.go index 9db15fe69e7f..bdc01b5b0e2d 100644 --- a/vendor/github.com/containerd/ttrpc/client.go +++ b/vendor/github.com/containerd/ttrpc/client.go @@ -134,11 +134,11 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int return err } - if cresp.Status == nil { - return errors.New("no status provided on response") + if cresp.Status != nil { + return status.ErrorProto(cresp.Status) } - return status.ErrorProto(cresp.Status) + return nil } func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error { diff --git a/vendor/github.com/containerd/ttrpc/services.go b/vendor/github.com/containerd/ttrpc/services.go index 655b2caea3e6..0eacfd79aa13 100644 --- a/vendor/github.com/containerd/ttrpc/services.go +++ b/vendor/github.com/containerd/ttrpc/services.go @@ -152,5 +152,5 @@ func convertCode(err error) codes.Code { } func fullPath(service, method string) string { - return "/" + path.Join("/", service, method) + return "/" + path.Join(service, method) } From 09b184c15a2f2a4688330ffaa9bc5b4d0c54e848 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Mon, 26 Aug 2019 22:46:49 +0800 Subject: [PATCH 23/77] rootfs: use new ctx to cleanup instead of canceled one rootfs.CreateDiff might be canceled by context for some reason. Based on this case, the defer function should use the new ctx to do cleanup temporary snapshotter instead of the canceled one. Signed-off-by: Wei Fu --- rootfs/diff.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/rootfs/diff.go b/rootfs/diff.go index b3e6ba8a33b6..f396c73ab094 100644 --- a/rootfs/diff.go +++ b/rootfs/diff.go @@ -22,6 +22,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/snapshots" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -31,6 +32,13 @@ import ( // the content creation and the provided snapshotter and mount differ are used // for calculating the diff. The descriptor for the layer diff is returned. func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter, d diff.Comparer, opts ...diff.Opt) (ocispec.Descriptor, error) { + // dctx is used to handle cleanup things just in case the param ctx + // has been canceled, which causes that the defer cleanup fails. + dctx := context.Background() + if ns, ok := namespaces.Namespace(ctx); ok { + dctx = namespaces.WithNamespace(dctx, ns) + } + info, err := sn.Stat(ctx, snapshotID) if err != nil { return ocispec.Descriptor{}, err @@ -41,7 +49,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter if err != nil { return ocispec.Descriptor{}, err } - defer sn.Remove(ctx, lowerKey) + defer sn.Remove(dctx, lowerKey) var upper []mount.Mount if info.Kind == snapshots.KindActive { @@ -55,7 +63,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter if err != nil { return ocispec.Descriptor{}, err } - defer sn.Remove(ctx, upperKey) + defer sn.Remove(dctx, upperKey) } return d.Compare(ctx, lower, upper, opts...) From 8266a3c5e7d7150fa323975e72f8adff0b52c11e Mon Sep 17 00:00:00 2001 From: chentanjun <2799194073@qq.com> Date: Tue, 27 Aug 2019 11:29:08 +0800 Subject: [PATCH 24/77] fix-up spelling mistake Signed-off-by: chentanjun <2799194073@qq.com> --- runtime/v2/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/v2/README.md b/runtime/v2/README.md index 51dcafafa407..76d30373fdfc 100644 --- a/runtime/v2/README.md +++ b/runtime/v2/README.md @@ -183,7 +183,7 @@ Current supported schemes for logging are: * file - Linux & Windows * npipe - Windows -Binary logging has the abilty to forward a container's STDIO to an external binary for consumption. +Binary logging has the ability to forward a container's STDIO to an external binary for consumption. A sample logging driver that forwards the container's STDOUT and STDERR to `journald` is: ```go From 6624a70d92781deff967c224b1f458b99a57bbe6 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Tue, 20 Aug 2019 22:00:48 +0000 Subject: [PATCH 25/77] runtime/opts: move WithNamespaceCgroupDeletion from containerd to its own package The cgroup dependency brings in quite a lot only for WithNamespaceCgroupDeletion, which is a namespaces.DeleteOpt. Signed-off-by: Tibor Vass --- cmd/ctr/commands/namespaces/namespaces_linux.go | 8 ++++---- namespaces_opts_linux.go => runtime/opts/opts_linux.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) rename namespaces_opts_linux.go => runtime/opts/opts_linux.go (98%) diff --git a/cmd/ctr/commands/namespaces/namespaces_linux.go b/cmd/ctr/commands/namespaces/namespaces_linux.go index 0300bb103db5..8562822a1ba7 100644 --- a/cmd/ctr/commands/namespaces/namespaces_linux.go +++ b/cmd/ctr/commands/namespaces/namespaces_linux.go @@ -17,15 +17,15 @@ package namespaces import ( - "github.com/containerd/containerd" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/runtime/opts" "github.com/urfave/cli" ) func deleteOpts(context *cli.Context) []namespaces.DeleteOpts { - var opts []namespaces.DeleteOpts + var delOpts []namespaces.DeleteOpts if context.Bool("cgroup") { - opts = append(opts, containerd.WithNamespaceCgroupDeletion) + delOpts = append(delOpts, opts.WithNamespaceCgroupDeletion) } - return opts + return delOpts } diff --git a/namespaces_opts_linux.go b/runtime/opts/opts_linux.go similarity index 98% rename from namespaces_opts_linux.go rename to runtime/opts/opts_linux.go index 6b8cc8f855c4..012bdb2b8321 100644 --- a/namespaces_opts_linux.go +++ b/runtime/opts/opts_linux.go @@ -14,7 +14,7 @@ limitations under the License. */ -package containerd +package opts import ( "context" From 2d8a65b1b244bba558fd0fe6bca0c80564cc4dad Mon Sep 17 00:00:00 2001 From: Kathryn Baldauf Date: Tue, 27 Aug 2019 17:07:40 -0700 Subject: [PATCH 26/77] Export shim publisher functions - Our out of tree shim would like to publish events with ttrpc. These functions should be exposed so our shim doesn't need to reimplement publisher logic. Signed-off-by: Kathryn Baldauf --- runtime/v2/shim/publisher.go | 18 +++++++++--------- runtime/v2/shim/shim.go | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/runtime/v2/shim/publisher.go b/runtime/v2/shim/publisher.go index d1f2a0c28f10..3dbd0e045bec 100644 --- a/runtime/v2/shim/publisher.go +++ b/runtime/v2/shim/publisher.go @@ -41,13 +41,13 @@ type item struct { count int } -func newPublisher(address string) (*remoteEventsPublisher, error) { +func NewPublisher(address string) (*RemoteEventsPublisher, error) { client, err := ttrpcutil.NewClient(address) if err != nil { return nil, err } - l := &remoteEventsPublisher{ + l := &RemoteEventsPublisher{ client: client, closed: make(chan struct{}), requeue: make(chan *item, queueSize), @@ -57,18 +57,18 @@ func newPublisher(address string) (*remoteEventsPublisher, error) { return l, nil } -type remoteEventsPublisher struct { +type RemoteEventsPublisher struct { client *ttrpcutil.Client closed chan struct{} closer sync.Once requeue chan *item } -func (l *remoteEventsPublisher) Done() <-chan struct{} { +func (l *RemoteEventsPublisher) Done() <-chan struct{} { return l.closed } -func (l *remoteEventsPublisher) Close() (err error) { +func (l *RemoteEventsPublisher) Close() (err error) { err = l.client.Close() l.closer.Do(func() { close(l.closed) @@ -76,7 +76,7 @@ func (l *remoteEventsPublisher) Close() (err error) { return err } -func (l *remoteEventsPublisher) processQueue() { +func (l *RemoteEventsPublisher) processQueue() { for i := range l.requeue { if i.count > maxRequeue { logrus.Errorf("evicting %s from queue because of retry count", i.ev.Topic) @@ -91,7 +91,7 @@ func (l *remoteEventsPublisher) processQueue() { } } -func (l *remoteEventsPublisher) queue(i *item) { +func (l *RemoteEventsPublisher) queue(i *item) { go func() { i.count++ // re-queue after a short delay @@ -100,7 +100,7 @@ func (l *remoteEventsPublisher) queue(i *item) { }() } -func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { +func (l *RemoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { return err @@ -127,7 +127,7 @@ func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event return nil } -func (l *remoteEventsPublisher) forwardRequest(ctx context.Context, req *v1.ForwardRequest) error { +func (l *RemoteEventsPublisher) forwardRequest(ctx context.Context, req *v1.ForwardRequest) error { _, err := l.client.EventsService().Forward(ctx, req) if err == nil { return nil diff --git a/runtime/v2/shim/shim.go b/runtime/v2/shim/shim.go index 2b527c48be0f..d540aa87e050 100644 --- a/runtime/v2/shim/shim.go +++ b/runtime/v2/shim/shim.go @@ -169,7 +169,7 @@ func run(id string, initFunc Init, config Config) error { ttrpcAddress := os.Getenv(ttrpcAddressEnv) - publisher, err := newPublisher(ttrpcAddress) + publisher, err := NewPublisher(ttrpcAddress) if err != nil { return err } From 372472b5f648bccb7c3045ccd5921b0082fd87bb Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Tue, 27 Aug 2019 17:42:32 -0700 Subject: [PATCH 27/77] archive: truncate modification time Signed-off-by: Tonis Tiigi --- archive/tar.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/archive/tar.go b/archive/tar.go index 3a1e77a7fc98..7ec46575647f 100644 --- a/archive/tar.go +++ b/archive/tar.go @@ -504,6 +504,12 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + // truncate timestamp for compatibility. without PAX stdlib rounds timestamps instead + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + name := p if strings.HasPrefix(name, string(filepath.Separator)) { name, err = filepath.Rel(string(filepath.Separator), name) From 92a5b08a68b9986e73976147ef26d01bd14fb97d Mon Sep 17 00:00:00 2001 From: chentanjun <2799194073@qq.com> Date: Wed, 28 Aug 2019 09:56:57 +0800 Subject: [PATCH 28/77] fix-grammar-mistake Signed-off-by: chentanjun <2799194073@qq.com> --- containers/containers.go | 2 +- docs/namespaces.md | 2 +- log/context.go | 2 +- oci/spec_opts.go | 4 ++-- platforms/platforms.go | 2 +- reports/2017-02-10.md | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/containers/containers.go b/containers/containers.go index c7ad2bfaa247..7174bbd6aa6f 100644 --- a/containers/containers.go +++ b/containers/containers.go @@ -49,7 +49,7 @@ type Container struct { // This property is required and immutable. Runtime RuntimeInfo - // Spec should carry the the runtime specification used to implement the + // Spec should carry the runtime specification used to implement the // container. // // This field is required but mutable. diff --git a/docs/namespaces.md b/docs/namespaces.md index ff939b1d082d..54ea625bb810 100644 --- a/docs/namespaces.md +++ b/docs/namespaces.md @@ -64,7 +64,7 @@ Note that currently only these two labels are used to configure the defaults and If we need to inspect containers, images, or other resources in various namespaces the `ctr` tool allows you to do this. Simply set the `--namespace,-n` flag on `ctr` to change the namespace. If you do not provide a namespace, `ctr` client commands -will all use the the default namespace, which is simply named "`default`". +will all use the default namespace, which is simply named "`default`". ```bash > sudo ctr -n docker tasks diff --git a/log/context.go b/log/context.go index 3fab96b85862..31f1a3ac09b1 100644 --- a/log/context.go +++ b/log/context.go @@ -30,7 +30,7 @@ var ( // messages. G = GetLogger - // L is an alias for the the standard logger. + // L is an alias for the standard logger. L = logrus.NewEntry(logrus.StandardLogger()) ) diff --git a/oci/spec_opts.go b/oci/spec_opts.go index bbd8200829b6..b165aee2c937 100644 --- a/oci/spec_opts.go +++ b/oci/spec_opts.go @@ -118,7 +118,7 @@ func WithDefaultSpecForPlatform(platform string) SpecOpts { } } -// WithSpecFromBytes loads the the spec from the provided byte slice. +// WithSpecFromBytes loads the spec from the provided byte slice. func WithSpecFromBytes(p []byte) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { *s = Spec{} // make sure spec is cleared. @@ -628,7 +628,7 @@ func WithUserID(uid uint32) SpecOpts { } // WithUsername sets the correct UID and GID for the container -// based on the the image's /etc/passwd contents. If /etc/passwd +// based on the image's /etc/passwd contents. If /etc/passwd // does not exist, or the username is not found in /etc/passwd, // it returns error. func WithUsername(username string) SpecOpts { diff --git a/platforms/platforms.go b/platforms/platforms.go index 2c2cc1102e18..d2b73ac3d399 100644 --- a/platforms/platforms.go +++ b/platforms/platforms.go @@ -130,7 +130,7 @@ type Matcher interface { // specification. The returned matcher only looks for equality based on os, // architecture and variant. // -// One may implement their own matcher if this doesn't provide the the required +// One may implement their own matcher if this doesn't provide the required // functionality. // // Applications should opt to use `Match` over directly parsing specifiers. diff --git a/reports/2017-02-10.md b/reports/2017-02-10.md index fea529ce8b06..e183f4ae86d4 100644 --- a/reports/2017-02-10.md +++ b/reports/2017-02-10.md @@ -38,7 +38,7 @@ much smaller interface! ## Bundles Bundles Bundles We spend time talking with people implementing Windows support as well as a few other users. -One the the major issues with our current approach was that bundles were a central part of our architecture. +One the major issues with our current approach was that bundles were a central part of our architecture. The content and storage subsystems would produce bundles and the execution subsystem would consume them. However, with a bundle being on the filesystem, having this concept does not work as smoothly on Windows as it would for Unix platforms. From 97962976a4cd8a292a4340ad73fcccedf1f8b39f Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 28 Aug 2019 14:00:13 -0400 Subject: [PATCH 29/77] Update ttrpc to 92c8520ef9f86600c650dd540266a00 Signed-off-by: Michael Crosby --- vendor.conf | 2 +- vendor/github.com/containerd/ttrpc/client.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/vendor.conf b/vendor.conf index fab4b62013e0..49b9f71f5490 100644 --- a/vendor.conf +++ b/vendor.conf @@ -37,7 +37,7 @@ github.com/Microsoft/go-winio v0.4.14 github.com/Microsoft/hcsshim 9e921883ac929bbe515b39793ece99ce3a9d7706 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 -github.com/containerd/ttrpc 9abb3e268010ea188f4e4051f77eb5aca49315fb +github.com/containerd/ttrpc 92c8520ef9f86600c650dd540266a007bf03670f github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 gotest.tools v2.3.0 github.com/google/go-cmp v0.2.0 diff --git a/vendor/github.com/containerd/ttrpc/client.go b/vendor/github.com/containerd/ttrpc/client.go index bdc01b5b0e2d..bdd1d12e7a02 100644 --- a/vendor/github.com/containerd/ttrpc/client.go +++ b/vendor/github.com/containerd/ttrpc/client.go @@ -29,6 +29,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -134,10 +135,9 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int return err } - if cresp.Status != nil { + if cresp.Status != nil && cresp.Status.Code != int32(codes.OK) { return status.ErrorProto(cresp.Status) } - return nil } From a292bf0f248da4d2e2fef062ab24100e317fd383 Mon Sep 17 00:00:00 2001 From: Akihiro Suda Date: Thu, 29 Aug 2019 14:28:13 +0900 Subject: [PATCH 30/77] bump containerd/zfs 2ceb2dbb8154202ed1b8fd32e4ea25b491d7b251 https://github.com/containerd/zfs/compare/31af176f2ae84fe142ef2655bf7bb2aa618b3b1f...2ceb2dbb8154202ed1b8fd32e4ea25b491d7b251 Fix containerd/zfs#22 Signed-off-by: Akihiro Suda --- vendor.conf | 2 +- vendor/github.com/containerd/zfs/zfs.go | 16 +++++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/vendor.conf b/vendor.conf index 49b9f71f5490..d8063b2a9fb6 100644 --- a/vendor.conf +++ b/vendor.conf @@ -80,7 +80,7 @@ k8s.io/utils c2654d5206da6b7b6ace12841e8f359bb89b443c sigs.k8s.io/yaml v1.1.0 # zfs dependencies -github.com/containerd/zfs 31af176f2ae84fe142ef2655bf7bb2aa618b3b1f +github.com/containerd/zfs 2ceb2dbb8154202ed1b8fd32e4ea25b491d7b251 github.com/mistifyio/go-zfs f784269be439d704d3dfa1906f45dd848fed2beb github.com/google/uuid v1.1.1 diff --git a/vendor/github.com/containerd/zfs/zfs.go b/vendor/github.com/containerd/zfs/zfs.go index 5b249b970f34..5a08fda6e7cf 100644 --- a/vendor/github.com/containerd/zfs/zfs.go +++ b/vendor/github.com/containerd/zfs/zfs.go @@ -298,18 +298,20 @@ func (z *snapshotter) Remove(ctx context.Context, key string) (err error) { datasetName := filepath.Join(z.dataset.Name, id) if k == snapshots.KindCommitted { - datasetName += "@" + snapshotSuffix + snapshotName := datasetName + "@" + snapshotSuffix + snapshot, err := zfs.GetDataset(snapshotName) + if err != nil { + return err + } + if err = destroySnapshot(snapshot); err != nil { + return err + } } dataset, err := zfs.GetDataset(datasetName) if err != nil { return err } - if k == snapshots.KindCommitted { - err = destroySnapshot(dataset) - } else { - err = destroy(dataset) - } - if err != nil { + if err = destroy(dataset); err != nil { return err } err = t.Commit() From bc692cc59dd6f9d475e92e2bf17742fce38ca0a3 Mon Sep 17 00:00:00 2001 From: Phil Estes Date: Thu, 29 Aug 2019 10:11:03 -0400 Subject: [PATCH 31/77] Use default UNIX env when image has no environment Signed-off-by: Phil Estes --- oci/spec_opts.go | 6 +++- oci/spec_opts_unix_test.go | 72 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 oci/spec_opts_unix_test.go diff --git a/oci/spec_opts.go b/oci/spec_opts.go index b165aee2c937..a18c6b214f2a 100644 --- a/oci/spec_opts.go +++ b/oci/spec_opts.go @@ -333,7 +333,11 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { setProcess(s) if s.Linux != nil { - s.Process.Env = replaceOrAppendEnvValues(config.Env, s.Process.Env) + defaults := config.Env + if len(defaults) == 0 { + defaults = defaultUnixEnv + } + s.Process.Env = replaceOrAppendEnvValues(defaults, s.Process.Env) cmd := config.Cmd if len(args) > 0 { cmd = args diff --git a/oci/spec_opts_unix_test.go b/oci/spec_opts_unix_test.go new file mode 100644 index 000000000000..22373fb67e12 --- /dev/null +++ b/oci/spec_opts_unix_test.go @@ -0,0 +1,72 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "context" + "testing" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/namespaces" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func TestWithImageConfigNoEnv(t *testing.T) { + t.Parallel() + var ( + s Spec + c = containers.Container{ID: "TestWithImageConfigNoEnv"} + ctx = namespaces.WithNamespace(context.Background(), "test") + ) + + err := populateDefaultUnixSpec(ctx, &s, c.ID) + if err != nil { + t.Fatal(err) + } + // test hack: we don't want to test the WithAdditionalGIDs portion of the image config code + s.Windows = &specs.Windows{} + + img, err := newFakeImage(ocispec.Image{ + Config: ocispec.ImageConfig{ + Entrypoint: []string{"create", "--namespace=test"}, + Cmd: []string{"", "--debug"}, + }, + }) + if err != nil { + t.Fatal(err) + } + + opts := []SpecOpts{ + WithImageConfigArgs(img, []string{"--boo", "bar"}), + } + + // verify that if an image has no environment that we get a default Unix path + expectedEnv := []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"} + + for _, opt := range opts { + if err := opt(nil, nil, nil, &s); err != nil { + t.Fatal(err) + } + } + + if err := assertEqualsStringArrays(s.Process.Env, expectedEnv); err != nil { + t.Fatal(err) + } +} From f4927a2985da06e3050008b6bb9f4837020f3e71 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Thu, 29 Aug 2019 20:54:53 +0800 Subject: [PATCH 32/77] fix mis-spelling in nvidia.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 常仲民@daocloud --- contrib/nvidia/nvidia.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/nvidia/nvidia.go b/contrib/nvidia/nvidia.go index a48594b00099..6a351771ffc3 100644 --- a/contrib/nvidia/nvidia.go +++ b/contrib/nvidia/nvidia.go @@ -52,7 +52,7 @@ const ( Display Capability = "display" ) -// AllCaps returns the complete list of supported Nvidia capabilties. +// AllCaps returns the complete list of supported Nvidia capabilities. func AllCaps() []Capability { return []Capability{ Compute, From 86f8be86e181767a8c6926942c1e498bbcb4ebd8 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 29 Aug 2019 11:07:03 -0400 Subject: [PATCH 33/77] Add sigprocmask to default profile Signed-off-by: Michael Crosby --- contrib/seccomp/seccomp_default.go | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/seccomp/seccomp_default.go b/contrib/seccomp/seccomp_default.go index 042052792e60..af40395de04e 100644 --- a/contrib/seccomp/seccomp_default.go +++ b/contrib/seccomp/seccomp_default.go @@ -312,6 +312,7 @@ func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { "sigaltstack", "signalfd", "signalfd4", + "sigprocmask", "sigreturn", "socket", "socketcall", From 779701b29c8ed180903de17ab1a1c52a1cdadfe0 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 29 Aug 2019 11:07:17 -0400 Subject: [PATCH 34/77] Add --seccomp flag to ctr This enables testing of containers with the default seccomp profile Signed-off-by: Michael Crosby --- cmd/ctr/commands/commands.go | 4 +++ cmd/ctr/commands/run/run_unix.go | 4 +++ contrib/seccomp/seccomp.go | 2 -- .../seccomp/seccomp_default_unsupported.go | 26 +++++++++++++++++++ 4 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 contrib/seccomp/seccomp_default_unsupported.go diff --git a/cmd/ctr/commands/commands.go b/cmd/ctr/commands/commands.go index 4ad6bdff347e..b1a2b853a145 100644 --- a/cmd/ctr/commands/commands.go +++ b/cmd/ctr/commands/commands.go @@ -130,6 +130,10 @@ var ( Name: "device", Usage: "add a device to a container", }, + cli.BoolFlag{ + Name: "seccomp", + Usage: "enable the default seccomp profile", + }, } ) diff --git a/cmd/ctr/commands/run/run_unix.go b/cmd/ctr/commands/run/run_unix.go index 53d6c4f5f0b4..b3eaf926f647 100644 --- a/cmd/ctr/commands/run/run_unix.go +++ b/cmd/ctr/commands/run/run_unix.go @@ -26,6 +26,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/cmd/ctr/commands" "github.com/containerd/containerd/contrib/nvidia" + "github.com/containerd/containerd/contrib/seccomp" "github.com/containerd/containerd/oci" "github.com/containerd/containerd/platforms" "github.com/opencontainers/runtime-spec/specs-go" @@ -126,6 +127,9 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli if context.Bool("net-host") { opts = append(opts, oci.WithHostNamespace(specs.NetworkNamespace), oci.WithHostHostsFile, oci.WithHostResolvconf) } + if context.Bool("seccomp") { + opts = append(opts, seccomp.WithDefaultProfile()) + } joinNs := context.StringSlice("with-ns") for _, ns := range joinNs { diff --git a/contrib/seccomp/seccomp.go b/contrib/seccomp/seccomp.go index 275a4c3e6c74..b7cf1765d756 100644 --- a/contrib/seccomp/seccomp.go +++ b/contrib/seccomp/seccomp.go @@ -1,5 +1,3 @@ -// +build linux - /* Copyright The containerd Authors. diff --git a/contrib/seccomp/seccomp_default_unsupported.go b/contrib/seccomp/seccomp_default_unsupported.go new file mode 100644 index 000000000000..14d7b75e18e6 --- /dev/null +++ b/contrib/seccomp/seccomp_default_unsupported.go @@ -0,0 +1,26 @@ +// +build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package seccomp + +import specs "github.com/opencontainers/runtime-spec/specs-go" + +// DefaultProfile defines the whitelist for the default seccomp profile. +func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { + return &specs.LinuxSeccomp{} +} From db3a71173883bcfefe901eeea9fb1dcb30cd49a7 Mon Sep 17 00:00:00 2001 From: Maksym Pavlenko Date: Fri, 30 Aug 2019 10:55:32 -0700 Subject: [PATCH 35/77] Add 'containerd config dump' subcommand Signed-off-by: Maksym Pavlenko --- cmd/containerd/command/config.go | 69 ++++++++++++++++++++------------ 1 file changed, 43 insertions(+), 26 deletions(-) diff --git a/cmd/containerd/command/config.go b/cmd/containerd/command/config.go index f9d5f9efad82..48866b002fe7 100644 --- a/cmd/containerd/command/config.go +++ b/cmd/containerd/command/config.go @@ -40,6 +40,39 @@ func (c *Config) WriteTo(w io.Writer) (int64, error) { return 0, toml.NewEncoder(w).Encode(c) } +func outputConfig(cfg *srvconfig.Config) error { + config := &Config{ + Config: cfg, + } + + // for the time being, keep the defaultConfig's version set at 1 so that + // when a config without a version is loaded from disk and has no version + // set, we assume it's a v1 config. But when generating new configs via + // this command, generate the v2 config + config.Config.Version = 2 + plugins, err := server.LoadPlugins(gocontext.Background(), config.Config) + if err != nil { + return err + } + if len(plugins) != 0 { + config.Plugins = make(map[string]interface{}) + for _, p := range plugins { + if p.Config == nil { + continue + } + config.Plugins[p.URI()] = p.Config + } + } + timeouts := timeout.All() + config.Timeouts = make(map[string]string) + for k, v := range timeouts { + config.Timeouts[k] = v.String() + } + + _, err = config.WriteTo(os.Stdout) + return err +} + var configCommand = cli.Command{ Name: "config", Usage: "information on the containerd config", @@ -48,35 +81,19 @@ var configCommand = cli.Command{ Name: "default", Usage: "see the output of the default config", Action: func(context *cli.Context) error { - config := &Config{ - Config: defaultConfig(), - } - // for the time being, keep the defaultConfig's version set at 1 so that - // when a config without a version is loaded from disk and has no version - // set, we assume it's a v1 config. But when generating new configs via - // this command, generate the v2 config - config.Config.Version = 2 - plugins, err := server.LoadPlugins(gocontext.Background(), config.Config) - if err != nil { + return outputConfig(defaultConfig()) + }, + }, + { + Name: "dump", + Usage: "see the output of the final main config with imported in subconfig files", + Action: func(context *cli.Context) error { + config := defaultConfig() + if err := srvconfig.LoadConfig(context.GlobalString("config"), config); err != nil && !os.IsNotExist(err) { return err } - if len(plugins) != 0 { - config.Plugins = make(map[string]interface{}) - for _, p := range plugins { - if p.Config == nil { - continue - } - config.Plugins[p.URI()] = p.Config - } - } - timeouts := timeout.All() - config.Timeouts = make(map[string]string) - for k, v := range timeouts { - config.Timeouts[k] = v.String() - } - _, err = config.WriteTo(os.Stdout) - return err + return outputConfig(config) }, }, }, From b6c811b7a4b98655a70f1e3a604c43bc008ad643 Mon Sep 17 00:00:00 2001 From: Nishchay Kumar Date: Fri, 30 Aug 2019 11:26:44 -0700 Subject: [PATCH 36/77] Update cri to 0165d516161e25e52b4ab52a404a00823f8f0ef6 Signed-off-by: Nishchay Kumar --- vendor.conf | 2 +- .../cri/pkg/server/container_create.go | 19 +++++++++---------- .../containerd/cri/pkg/server/image_pull.go | 3 ++- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/vendor.conf b/vendor.conf index d8063b2a9fb6..6233a59afffb 100644 --- a/vendor.conf +++ b/vendor.conf @@ -48,7 +48,7 @@ github.com/hashicorp/golang-lru v0.5.3 go.opencensus.io v0.22.0 # cri dependencies -github.com/containerd/cri f1d492b0cdd14e76476ee4dd024696ce3634e501 # master +github.com/containerd/cri 0165d516161e25e52b4ab52a404a00823f8f0ef6 # master github.com/containerd/go-cni 49fbd9b210f3c8ee3b7fd3cd797aabaf364627c1 github.com/containernetworking/cni v0.7.1 github.com/containernetworking/plugins v0.7.6 diff --git a/vendor/github.com/containerd/cri/pkg/server/container_create.go b/vendor/github.com/containerd/cri/pkg/server/container_create.go index d6f9779c40ad..d34e66754db0 100644 --- a/vendor/github.com/containerd/cri/pkg/server/container_create.go +++ b/vendor/github.com/containerd/cri/pkg/server/container_create.go @@ -374,11 +374,11 @@ func (c *criService) generateContainerSpec(id string, sandboxID string, sandboxP if !c.config.DisableProcMount { // Apply masked paths if specified. - // Note: If the container is privileged, then we clear any masked paths later on in the call to setOCIPrivileged() + // If the container is privileged, this will be cleared later on. specOpts = append(specOpts, oci.WithMaskedPaths(securityContext.GetMaskedPaths())) // Apply readonly paths if specified. - // Note: If the container is privileged, then we clear any readonly paths later on in the call to setOCIPrivileged() + // If the container is privileged, this will be cleared later on. specOpts = append(specOpts, oci.WithReadonlyPaths(securityContext.GetReadonlyPaths())) } @@ -577,18 +577,17 @@ func generateApparmorSpecOpts(apparmorProf string, privileged, apparmorEnabled b return nil, nil } switch apparmorProf { - case runtimeDefault: - // TODO (mikebrow): delete created apparmor default profile - return apparmor.WithDefaultProfile(appArmorDefaultProfileName), nil - case unconfinedProfile: - return nil, nil - case "": - // Based on kubernetes#51746, default apparmor profile should be applied - // for non-privileged container when apparmor is not specified. + // Based on kubernetes#51746, default apparmor profile should be applied + // for when apparmor is not specified. + case runtimeDefault, "": if privileged { + // Do not set apparmor profile when container is privileged return nil, nil } + // TODO (mikebrow): delete created apparmor default profile return apparmor.WithDefaultProfile(appArmorDefaultProfileName), nil + case unconfinedProfile: + return nil, nil default: // Require and Trim default profile name prefix if !strings.HasPrefix(apparmorProf, profileNamePrefix) { diff --git a/vendor/github.com/containerd/cri/pkg/server/image_pull.go b/vendor/github.com/containerd/cri/pkg/server/image_pull.go index daa38848164d..942dcbeea7c9 100644 --- a/vendor/github.com/containerd/cri/pkg/server/image_pull.go +++ b/vendor/github.com/containerd/cri/pkg/server/image_pull.go @@ -187,7 +187,8 @@ func ParseAuth(auth *runtime.AuthConfig, host string) (string, string, error) { return user, strings.Trim(passwd, "\x00"), nil } // TODO(random-liu): Support RegistryToken. - return "", "", errors.New("invalid auth config") + // An empty auth config is valid for anonymous registry + return "", "", nil } // createImageReference creates image reference inside containerd image store. From 214b8328d21a855af67762ff3d35ff2abfd3b3ea Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Fri, 30 Aug 2019 09:42:14 +0200 Subject: [PATCH 37/77] travis: add Bionic (Ubuntu 18.04 LTS) to test matrix Using bionic (current LTS) as default, and add xenial (Ubuntu 16.04 LTS) to the matrix, to test the previous LTS release as well on master Signed-off-by: Sebastiaan van Stijn --- .travis.yml | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index c97348a48b72..fbbb6db79306 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,4 @@ -dist: xenial +dist: bionic sudo: required # setup travis so that we can run containers for integration tests services: @@ -18,6 +18,22 @@ env: - TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runtime.v1.linux TRAVIS_CGO_ENABLED=1 - TRAVIS_GOOS=darwin TRAVIS_CGO_ENABLED=0 +matrix: + include: + # On master, also test against the previous LTS (Xenial / Ubuntu 16.04 LTS) + - if: branch = master + os: linux + dist: xenial + env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v1 TRAVIS_CGO_ENABLED=1 + - if: branch = master + os: linux + dist: xenial + env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v2 TRAVIS_CGO_ENABLED=1 + - if: branch = master + os: linux + dist: xenial + env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runtime.v1.linux TRAVIS_CGO_ENABLED=1 + go_import_path: github.com/containerd/containerd addons: @@ -31,7 +47,7 @@ addons: - python-minimal - libcap-dev - libaio-dev - - libprotobuf-c0-dev + - libprotobuf-c-dev - libprotobuf-dev - socat From 94e4b68798ad34ca9e9f8c91de1995b7554a9030 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Fri, 30 Aug 2019 10:26:56 +0200 Subject: [PATCH 38/77] travis.yml: add TRAVIS_DISTRO env-var for easier identification Signed-off-by: Sebastiaan van Stijn --- .travis.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index fbbb6db79306..1adb825dc29b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,9 +13,9 @@ go: - "1.12.x" env: - - TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v1 TRAVIS_CGO_ENABLED=1 - - TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v2 TRAVIS_CGO_ENABLED=1 - - TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runtime.v1.linux TRAVIS_CGO_ENABLED=1 + - TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v1 TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=bionic + - TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v2 TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=bionic + - TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runtime.v1.linux TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=bionic - TRAVIS_GOOS=darwin TRAVIS_CGO_ENABLED=0 matrix: @@ -24,15 +24,15 @@ matrix: - if: branch = master os: linux dist: xenial - env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v1 TRAVIS_CGO_ENABLED=1 + env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v1 TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial - if: branch = master os: linux dist: xenial - env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v2 TRAVIS_CGO_ENABLED=1 + env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v2 TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial - if: branch = master os: linux dist: xenial - env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runtime.v1.linux TRAVIS_CGO_ENABLED=1 + env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runtime.v1.linux TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial go_import_path: github.com/containerd/containerd From fa546dc3e8b8c729f59f98eb525dd032985e1725 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sun, 1 Sep 2019 14:41:31 +0200 Subject: [PATCH 39/77] travis: don't run old Xenial LTS on pull requests The branch (for pull requests), is the base/target branch, so for pull requests against master, these would still be run. From the travis documentation: > branch (the current branch name; for pull requests: the base branch name) This patch excludes these jobs by not running them for pull request (event type=pull_request or event type=push (when rebasing a pull request)). Signed-off-by: Sebastiaan van Stijn --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1adb825dc29b..b3ede41e4b19 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,15 +21,15 @@ env: matrix: include: # On master, also test against the previous LTS (Xenial / Ubuntu 16.04 LTS) - - if: branch = master + - if: branch = master AND type NOT IN (push, pull_request) os: linux dist: xenial env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v1 TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial - - if: branch = master + - if: branch = master AND type NOT IN (push, pull_request) os: linux dist: xenial env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v2 TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial - - if: branch = master + - if: branch = master AND type NOT IN (push, pull_request) os: linux dist: xenial env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runtime.v1.linux TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial From dd24d76a1383333b316e340acfa8b62bd2621e63 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Sun, 1 Sep 2019 22:11:20 -0700 Subject: [PATCH 40/77] Fix potential containerd panic during graceful shutdown. Signed-off-by: Lantao Liu --- cmd/containerd/command/main_unix.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/containerd/command/main_unix.go b/cmd/containerd/command/main_unix.go index 90de40eaa934..c9081eeef3e5 100644 --- a/cmd/containerd/command/main_unix.go +++ b/cmd/containerd/command/main_unix.go @@ -58,6 +58,7 @@ func handleSignals(ctx context.Context, signals chan os.Signal, serverC chan *se } server.Stop() close(done) + return } } } From c410f0eaef7eae2d73d51387bdf30834e9b72ec9 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Mon, 2 Sep 2019 00:30:14 -0700 Subject: [PATCH 41/77] Fix potential panic for task in unknown state. Signed-off-by: Lantao Liu --- cio/io_unix.go | 22 ++++++++++++---------- container.go | 5 ++++- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/cio/io_unix.go b/cio/io_unix.go index eb2ada80bfee..42d320933bf8 100644 --- a/cio/io_unix.go +++ b/cio/io_unix.go @@ -72,17 +72,19 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) { } var wg = &sync.WaitGroup{} - wg.Add(1) - go func() { - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) - - io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p) - pipes.Stdout.Close() - wg.Done() - }() + if fifos.Stdout != "" { + wg.Add(1) + go func() { + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p) + pipes.Stdout.Close() + wg.Done() + }() + } - if !fifos.Terminal { + if !fifos.Terminal && fifos.Stderr != "" { wg.Add(1) go func() { p := bufPool.Get().(*[]byte) diff --git a/container.go b/container.go index 46d51ecd919a..fd880d0e0a38 100644 --- a/container.go +++ b/container.go @@ -25,6 +25,7 @@ import ( "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/api/types" + tasktypes "github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" @@ -382,7 +383,9 @@ func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, er return nil, err } var i cio.IO - if ioAttach != nil { + if ioAttach != nil && response.Process.Status != tasktypes.StatusUnknown { + // Do not attach IO for task in unknown state, because there + // are no fifo paths anyway. if i, err = attachExistingIO(response, ioAttach); err != nil { return nil, err } From 555cb31fd932d783f7a65dfe8706162856d97cd8 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Sat, 31 Aug 2019 18:04:08 -0700 Subject: [PATCH 42/77] Support configurable default platform in the client. Signed-off-by: Lantao Liu --- client.go | 7 +++++++ client_opts.go | 19 ++++++++++++++----- container_opts.go | 3 +-- container_opts_unix.go | 3 +-- container_restore_opts.go | 3 +-- image.go | 2 +- import.go | 2 +- install.go | 3 +-- pull.go | 2 +- 9 files changed, 28 insertions(+), 16 deletions(-) diff --git a/client.go b/client.go index 5299179c6397..99141e2db52c 100644 --- a/client.go +++ b/client.go @@ -99,6 +99,12 @@ func New(address string, opts ...ClientOpt) (*Client, error) { c.runtime = defaults.DefaultRuntime } + if copts.defaultPlatform != nil { + c.platform = copts.defaultPlatform + } else { + c.platform = platforms.Default() + } + if copts.services != nil { c.services = *copts.services } @@ -193,6 +199,7 @@ type Client struct { conn *grpc.ClientConn runtime string defaultns string + platform platforms.MatchComparer connector func() (*grpc.ClientConn, error) } diff --git a/client_opts.go b/client_opts.go index 86735953968d..6f485c18dcb1 100644 --- a/client_opts.go +++ b/client_opts.go @@ -26,11 +26,12 @@ import ( ) type clientOpts struct { - defaultns string - defaultRuntime string - services *services - dialOptions []grpc.DialOption - timeout time.Duration + defaultns string + defaultRuntime string + defaultPlatform platforms.MatchComparer + services *services + dialOptions []grpc.DialOption + timeout time.Duration } // ClientOpt allows callers to set options on the containerd client @@ -55,6 +56,14 @@ func WithDefaultRuntime(rt string) ClientOpt { } } +// WithDefaultPlatform sets the default platform matcher on the client +func WithDefaultPlatform(platform platforms.MatchComparer) ClientOpt { + return func(c *clientOpts) error { + c.defaultPlatform = platform + return nil + } +} + // WithDialOpts allows grpc.DialOptions to be set on the connection func WithDialOpts(opts []grpc.DialOption) ClientOpt { return func(c *clientOpts) error { diff --git a/container_opts.go b/container_opts.go index 23e77492ee36..8954840235d4 100644 --- a/container_opts.go +++ b/container_opts.go @@ -22,7 +22,6 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/oci" - "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/snapshots" "github.com/containerd/typeurl" "github.com/gogo/protobuf/types" @@ -190,7 +189,7 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta // root filesystem in read-only mode func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform) if err != nil { return err } diff --git a/container_opts_unix.go b/container_opts_unix.go index af52d042209c..b109a10ecb4b 100644 --- a/container_opts_unix.go +++ b/container_opts_unix.go @@ -28,7 +28,6 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/platforms" "github.com/opencontainers/image-spec/identity" ) @@ -45,7 +44,7 @@ func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerO func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform) if err != nil { return err } diff --git a/container_restore_opts.go b/container_restore_opts.go index 4f251c4a6b38..03722dba1a54 100644 --- a/container_restore_opts.go +++ b/container_restore_opts.go @@ -22,7 +22,6 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" "github.com/gogo/protobuf/proto" ptypes "github.com/gogo/protobuf/types" "github.com/opencontainers/image-spec/identity" @@ -58,7 +57,7 @@ func WithRestoreImage(ctx context.Context, id string, client *Client, checkpoint return err } - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform) if err != nil { return err } diff --git a/image.go b/image.go index f4c66da9f94b..9ef09ac2f882 100644 --- a/image.go +++ b/image.go @@ -110,7 +110,7 @@ func NewImage(client *Client, i images.Image) Image { return &image{ client: client, i: i, - platform: platforms.Default(), + platform: client.platform, } } diff --git a/import.go b/import.go index fc79f751807c..6080161f8412 100644 --- a/import.go +++ b/import.go @@ -125,7 +125,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt } var platformMatcher = platforms.All if !iopts.allPlatforms { - platformMatcher = platforms.Default() + platformMatcher = c.platform } var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { diff --git a/install.go b/install.go index 4545d4554e81..5b8b735ded51 100644 --- a/install.go +++ b/install.go @@ -27,7 +27,6 @@ import ( "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" "github.com/pkg/errors" ) @@ -43,7 +42,7 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts) } var ( cs = image.ContentStore() - platform = platforms.Default() + platform = c.platform ) manifest, err := images.Manifest(ctx, cs, image.Target(), platform) if err != nil { diff --git a/pull.go b/pull.go index fe9f6abda5b2..2520639dff56 100644 --- a/pull.go +++ b/pull.go @@ -44,7 +44,7 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima if len(pullCtx.Platforms) > 1 { return nil, errors.New("cannot pull multiplatform image locally, try Fetch") } else if len(pullCtx.Platforms) == 0 { - pullCtx.PlatformMatcher = platforms.Default() + pullCtx.PlatformMatcher = c.platform } else { p, err := platforms.Parse(pullCtx.Platforms[0]) if err != nil { From cd79e0edfe815875f33609e839caff2a4b914b55 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 4 Sep 2019 01:29:03 +0200 Subject: [PATCH 43/77] travis: fix Xenial tests not being run on master Signed-off-by: Sebastiaan van Stijn --- .travis.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index b3ede41e4b19..2e0e1452a3f6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,16 +20,16 @@ env: matrix: include: - # On master, also test against the previous LTS (Xenial / Ubuntu 16.04 LTS) - - if: branch = master AND type NOT IN (push, pull_request) + # Skip testing previous LTS (Xenial / Ubuntu 16.04 LTS) on pull requests + - if: type != pull_request os: linux dist: xenial env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v1 TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial - - if: branch = master AND type NOT IN (push, pull_request) + - if: type != pull_request os: linux dist: xenial env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v2 TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial - - if: branch = master AND type NOT IN (push, pull_request) + - if: type != pull_request os: linux dist: xenial env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runtime.v1.linux TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial From 01f7265892f0bd7df1448275499ed6afb9160b66 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 3 Sep 2019 17:12:03 -0700 Subject: [PATCH 44/77] Support v1 configurations for config dump Signed-off-by: Derek McGowan --- cmd/containerd/command/config.go | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/cmd/containerd/command/config.go b/cmd/containerd/command/config.go index 48866b002fe7..1e5710d4297b 100644 --- a/cmd/containerd/command/config.go +++ b/cmd/containerd/command/config.go @@ -45,11 +45,6 @@ func outputConfig(cfg *srvconfig.Config) error { Config: cfg, } - // for the time being, keep the defaultConfig's version set at 1 so that - // when a config without a version is loaded from disk and has no version - // set, we assume it's a v1 config. But when generating new configs via - // this command, generate the v2 config - config.Config.Version = 2 plugins, err := server.LoadPlugins(gocontext.Background(), config.Config) if err != nil { return err @@ -60,15 +55,31 @@ func outputConfig(cfg *srvconfig.Config) error { if p.Config == nil { continue } - config.Plugins[p.URI()] = p.Config + + pc, err := config.Decode(p) + if err != nil { + return err + } + + config.Plugins[p.URI()] = pc } } + timeouts := timeout.All() config.Timeouts = make(map[string]string) for k, v := range timeouts { config.Timeouts[k] = v.String() } + // for the time being, keep the defaultConfig's version set at 1 so that + // when a config without a version is loaded from disk and has no version + // set, we assume it's a v1 config. But when generating new configs via + // this command, generate the v2 config + config.Config.Version = 2 + + // remove overridden Plugins type to avoid duplication in output + config.Config.Plugins = nil + _, err = config.WriteTo(os.Stdout) return err } From a4482d9a6f230091274d45afa0ea34d5cfe6c4d9 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 3 Sep 2019 17:18:38 -0700 Subject: [PATCH 45/77] Fix proxy plugin config validation Proxy plugins are keyed only on the identifier, the type is specified within the proxy plugin configuration which maps to the full URI. The proxy plugin configuration is not passed to the plugin for configuration like other plugins. Signed-off-by: Derek McGowan --- services/server/config/config.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/services/server/config/config.go b/services/server/config/config.go index 5464b6e7d0db..c6e57275341a 100644 --- a/services/server/config/config.go +++ b/services/server/config/config.go @@ -107,11 +107,6 @@ func (c *Config) ValidateV2() error { return errors.Errorf("invalid plugin key URI %q expect io.containerd.x.vx", p) } } - for p := range c.ProxyPlugins { - if len(strings.Split(p, ".")) < 4 { - return errors.Errorf("invalid proxy plugin key URI %q expect io.containerd.x.vx", p) - } - } return nil } From d6a6e0b8bc858ea11d31561f2a88bff5622ffca0 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Tue, 3 Sep 2019 18:42:41 -0700 Subject: [PATCH 46/77] Support foreign and encrypted layers in the unpacker. Signed-off-by: Lantao Liu --- unpacker.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/unpacker.go b/unpacker.go index 1dc8c148128a..790c06c8dd99 100644 --- a/unpacker.go +++ b/unpacker.go @@ -229,8 +229,11 @@ func (u *unpacker) handlerWrapper(uctx context.Context, unpacks *int32) (func(im return u.unpack(uctx, desc, l) }) } - case images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2Layer, - ocispec.MediaTypeImageLayerGzip, ocispec.MediaTypeImageLayer: + case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, + images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip, + ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, + ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip, + images.MediaTypeDockerSchema2LayerEnc, images.MediaTypeDockerSchema2LayerGzipEnc: lock.Lock() update := !schema1 lock.Unlock() From 8788af7f8d26aab8a2b6a083df74ea6c250218c9 Mon Sep 17 00:00:00 2001 From: chentanjun <2799194073@qq.com> Date: Wed, 4 Sep 2019 21:43:00 +0800 Subject: [PATCH 47/77] modify-document-duplicate-word Signed-off-by: chentanjun <2799194073@qq.com> --- oci/spec_test.go | 2 +- platforms/database.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/oci/spec_test.go b/oci/spec_test.go index 7e04b960e805..364734927330 100644 --- a/oci/spec_test.go +++ b/oci/spec_test.go @@ -63,7 +63,7 @@ func TestGenerateSpec(t *testing.T) { } } else { if s.Windows == nil { - t.Fatal("Windows section of spec not filled on on Windows platform") + t.Fatal("Windows section of spec not filled in on Windows platform") } } diff --git a/platforms/database.go b/platforms/database.go index 8e85448ed028..3a312cff6ebe 100644 --- a/platforms/database.go +++ b/platforms/database.go @@ -28,7 +28,7 @@ func isLinuxOS(os string) bool { return os == "linux" } -// These function are generated from from https://golang.org/src/go/build/syslist.go. +// These function are generated from https://golang.org/src/go/build/syslist.go. // // We use switch statements because they are slightly faster than map lookups // and use a little less memory. From 3d1fdacccf503d5a084e6b1db1277c665817fb07 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Thu, 5 Sep 2019 14:50:04 +0800 Subject: [PATCH 48/77] config_test: sort result before check config imports is in random order after LoadConfig call so that the testing check should be done after sort.Strings(out.Imports). Signed-off-by: Wei Fu --- services/server/config/config_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/services/server/config/config_test.go b/services/server/config/config_test.go index 8660105d46af..3091abc5d214 100644 --- a/services/server/config/config_test.go +++ b/services/server/config/config_test.go @@ -20,6 +20,7 @@ import ( "io/ioutil" "os" "path/filepath" + "sort" "testing" "gotest.tools/assert" @@ -175,6 +176,7 @@ imports = ["data1.toml", "data2.toml"] assert.Equal(t, "/var/lib/containerd", out.Root) assert.DeepEqual(t, []string{"io.containerd.v1.xyz"}, out.DisabledPlugins) + sort.Strings(out.Imports) assert.DeepEqual(t, []string{ filepath.Join(tempDir, "data1.toml"), filepath.Join(tempDir, "data2.toml"), From c8cb864ce026316e68a149be9fbcdb0c68afab9d Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 5 Sep 2019 14:15:43 +0200 Subject: [PATCH 49/77] platforms: update known OS and arch values Update the lists in isKnownOS and isKnownOS according to goosList and goarchList taken from Go 1.13, see https://github.com/golang/go/blob/release-branch.go1.13/src/go/build/syslist.go Signed-off-by: Tobias Klauser --- platforms/database.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/platforms/database.go b/platforms/database.go index 8e85448ed028..1cbcf42797c9 100644 --- a/platforms/database.go +++ b/platforms/database.go @@ -38,7 +38,7 @@ func isLinuxOS(os string) bool { // The OS value should be normalized before calling this function. func isKnownOS(os string) bool { switch os { - case "android", "darwin", "dragonfly", "freebsd", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": return true } return false @@ -60,7 +60,7 @@ func isArmArch(arch string) bool { // The arch value should be normalized before being passed to this function. func isKnownArch(arch string) bool { switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "s390", "s390x", "sparc", "sparc64": + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": return true } return false From 5c576b026960fac507f210a585870588ac91d912 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 5 Sep 2019 11:35:59 -0700 Subject: [PATCH 50/77] Update and simplify mailmap Add new contributors for 1.3. Simplify mailmap by removing unnecessary commit names. Signed-off-by: Derek McGowan --- .mailmap | 96 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 49 insertions(+), 47 deletions(-) diff --git a/.mailmap b/.mailmap index 37b8d00a535b..9fa2a26564a2 100644 --- a/.mailmap +++ b/.mailmap @@ -1,47 +1,49 @@ -Abhinandan Prativadi abhi -Abhinandan Prativadi Abhinandan Prativadi -Akihiro Suda Akihiro Suda -Akihiro Suda Akihiro Suda -Andrei Vagin Andrei Vagin -Andrey Kolomentsev akolomentsev -Brent Baude baude -Carlos Eduardo CarlosEDP -Eric Ren renzhen.rz -Frank Yang frank yang -Georgia Panoutsakopoulou gpanouts -Guangming Wang ethan -Haiyan Meng haiyanmeng -Jian Liao liaojian -Jian Liao liaoj -Ji'an Liu ZeroMagic -Jie Zhang kadisi -John Howard John Howard -John Howard John Howard -Julien Balestra JulienBalestra -Justin Cormack Justin Cormack -Justin Terry Justin -Justin Terry Justin Terry (VM) -Kenfe-Mickaël Laventure Kenfe-Mickael Laventure -Kevin Xu kevin.xu -Lantao Liu Lantao Liu -Lifubang Lifubang -Lu Jingxiao l00397676 -Maksym Pavlenko Maksym Pavlenko -Mark Gordon msg555 -Michael Katsoulis MichaelKatsoulis -Mike Brown Mike Brown -Phil Estes Phil Estes -Rui Cao ruicao -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Stephen J Day Stephen J Day -Sudeesh John sudeesh john -Su Fei fesu -Tõnis Tiigi Tonis Tiigi -Wei Fu Wei Fu -Xiaodong Zhang nashasha1 -Xuean Yan yanxuean -Yuxing Liu Starnop -zhenguang zhu dzzg -zhoulin xie JoeWrightss <42261994+JoeWrightss@users.noreply.github.com> -zhoulin xie JoeWrightss +Abhinandan Prativadi +Abhinandan Prativadi +Akihiro Suda +Akihiro Suda +Andrei Vagin +Andrey Kolomentsev +Brent Baude +Carlos Eduardo +Eric Ren +Frank Yang +Georgia Panoutsakopoulou +Guangming Wang +Haiyan Meng +Jian Liao +Jian Liao +Ji'an Liu +Jie Zhang +John Howard +John Howard +Julien Balestra +Justin Cormack +Justin Terry +Justin Terry +Kenfe-Mickaël Laventure +Kevin Xu +Lantao Liu +Lifubang +Lu Jingxiao +Maksym Pavlenko +Mark Gordon +Michael Katsoulis +Mike Brown +Nishchay Kumar +Phil Estes +Rui Cao +Stephen J Day +Stephen J Day +Stephen J Day +Sudeesh John +Su Fei +Tõnis Tiigi +Wei Fu +Xiaodong Zhang +Xuean Yan +Yuxing Liu +zhenguang zhu +zhongming chang +zhoulin xie +zhoulin xie <42261994+JoeWrightss@users.noreply.github.com> From fa11147e5fcd85c958151b29404ae07787380893 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 6 Sep 2019 16:25:02 -0400 Subject: [PATCH 51/77] Add --env-file to ctr Closes #3517 Signed-off-by: Michael Crosby --- cmd/ctr/commands/commands.go | 4 ++++ cmd/ctr/commands/run/run_unix.go | 3 +++ cmd/ctr/commands/run/run_windows.go | 3 +++ oci/spec_opts.go | 22 ++++++++++++++++++++++ 4 files changed, 32 insertions(+) diff --git a/cmd/ctr/commands/commands.go b/cmd/ctr/commands/commands.go index b1a2b853a145..808ea37d7b52 100644 --- a/cmd/ctr/commands/commands.go +++ b/cmd/ctr/commands/commands.go @@ -77,6 +77,10 @@ var ( Name: "env", Usage: "specify additional container environment variables (i.e. FOO=bar)", }, + cli.StringFlag{ + Name: "env-file", + Usage: "specify additional container environment variables in a file(i.e. FOO=bar, one per line)", + }, cli.StringSliceFlag{ Name: "label", Usage: "specify additional labels (i.e. foo=bar)", diff --git a/cmd/ctr/commands/run/run_unix.go b/cmd/ctr/commands/run/run_unix.go index b3eaf926f647..2ba015cd0112 100644 --- a/cmd/ctr/commands/run/run_unix.go +++ b/cmd/ctr/commands/run/run_unix.go @@ -64,6 +64,9 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli args = context.Args()[2:] ) opts = append(opts, oci.WithDefaultSpec(), oci.WithDefaultUnixDevices) + if ef := context.String("env-file"); ef != "" { + opts = append(opts, oci.WithEnvFile(ef)) + } opts = append(opts, oci.WithEnv(context.StringSlice("env"))) opts = append(opts, withMounts(context)) diff --git a/cmd/ctr/commands/run/run_windows.go b/cmd/ctr/commands/run/run_windows.go index a2b860808053..d0e4ad934cca 100644 --- a/cmd/ctr/commands/run/run_windows.go +++ b/cmd/ctr/commands/run/run_windows.go @@ -67,6 +67,9 @@ func NewContainer(ctx gocontext.Context, client *containerd.Client, context *cli opts = append(opts, oci.WithWindowNetworksAllowUnqualifiedDNSQuery()) opts = append(opts, oci.WithWindowsIgnoreFlushesDuringBoot()) } + if ef := context.String("env-file"); ef != "" { + opts = append(opts, oci.WithEnvFile(ef)) + } opts = append(opts, oci.WithEnv(context.StringSlice("env"))) opts = append(opts, withMounts(context)) diff --git a/oci/spec_opts.go b/oci/spec_opts.go index a18c6b214f2a..33bbcf23c021 100644 --- a/oci/spec_opts.go +++ b/oci/spec_opts.go @@ -17,6 +17,7 @@ package oci import ( + "bufio" "context" "encoding/json" "fmt" @@ -1200,3 +1201,24 @@ func WithLinuxDevice(path, permissions string) SpecOpts { return nil } } + +// WithEnvFile adds environment variables from a file to the container's spec +func WithEnvFile(path string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + var vars []string + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + if sc.Err() != nil { + return sc.Err() + } + vars = append(vars, sc.Text()) + } + return WithEnv(vars)(nil, nil, nil, s) + } +} From c1fc21e92e76e6bd501e136f6eaa157d60a10cad Mon Sep 17 00:00:00 2001 From: Akihiro Suda Date: Mon, 9 Sep 2019 02:37:34 +0900 Subject: [PATCH 52/77] ctr: use NewDockerAuthorizer instead of deprecated NewAuthorizer Signed-off-by: Akihiro Suda --- cmd/ctr/commands/resolver.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/ctr/commands/resolver.go b/cmd/ctr/commands/resolver.go index cdb3e94ea955..1b3c30a862dc 100644 --- a/cmd/ctr/commands/resolver.go +++ b/cmd/ctr/commands/resolver.go @@ -103,7 +103,8 @@ func GetResolver(ctx gocontext.Context, clicontext *cli.Context) (remotes.Resolv // Only one host return username, secret, nil } - options.Authorizer = docker.NewAuthorizer(options.Client, credentials) + authOpts := []docker.AuthorizerOpt{docker.WithAuthClient(options.Client), docker.WithAuthCreds(credentials)} + options.Authorizer = docker.NewDockerAuthorizer(authOpts...) return docker.NewResolver(options), nil } From 61f113e6081e5dbaf394216d5b3a2a55f8819661 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Fri, 6 Sep 2019 18:46:46 -0700 Subject: [PATCH 53/77] Automate CRI tarball release. Signed-off-by: Lantao Liu --- .travis.yml | 38 ++++++++++++------ Makefile | 1 + script/release/release-cri | 39 +++++++++++++++++++ vendor.conf | 2 +- .../cri/pkg/server/io/container_io.go | 24 ++++++------ .../containerd/cri/pkg/server/io/helpers.go | 20 ++++++---- 6 files changed, 93 insertions(+), 31 deletions(-) create mode 100755 script/release/release-cri diff --git a/.travis.yml b/.travis.yml index 2e0e1452a3f6..80eaa30610ca 100644 --- a/.travis.yml +++ b/.travis.yml @@ -117,14 +117,30 @@ before_deploy: - make release deploy: - provider: releases - api_key: - secure: HO+WSIVVUMMsbU74x+YyFsTP3ahqnR4xjwKAziedJ5lZXKJszQBhiYTFmcTeVBoouNjTISd07GQzpoLChuGC20U3+1NbT+CkK8xWR/x1ao2D3JY3Ds6AD9ubWRNWRLptt/xOn5Vq3F8xZyUYchwvDMl4zKCuTKxQGVdHKsINb2DehKcP5cVL6MMvqzEdfj2g99vqXAqs8uuo6dOmvxmHV43bfzDaAJSabjZZs6TKlWTqCQMet8uxyx2Dmjl2lxLwdqv12oJdrszacasn41NYuEyHI2bXyef1mhWGYN4n9bU/Y5winctZ8DOSOZvYg/2ziAaUN0+CTn1IESwVesrPz23P2Sy7wdLxu8dSIZ2yUHl7OsA5T5a5rDchAGguRVNBWvoGtuepEhdRacxTQUo1cMFZsEXjgRKKjdfc1emYQPVdN8mBv8GJwndty473ZXdvFt5R0kNVFtvWuYCa6UYJD2cKrsPSAfbZCDC/LiR3FOoTaUPMZUVkR2ACEO7Dn4+KlmBajqT40Osk/A7k1XA/TzVhMIpLtE0Vk2DfPmGsjCv8bC+MFd+R2Sc8SFdE92oEWRdoPQY5SxMYQtGxA+cbKVlT1kSw6y80yEbx5JZsBnT6+NTHwmDO3kVU9ztLdawOozTElKNAK8HoAyFmzIZ3wL64oThuDrv/TUuY8Iyn814= - file_glob: true - file: - - releases/*.tar.gz - - releases/*.tar.gz.sha256sum - skip_cleanup: true - on: - repo: containerd/containerd - tags: true + - provider: releases + api_key: + secure: HO+WSIVVUMMsbU74x+YyFsTP3ahqnR4xjwKAziedJ5lZXKJszQBhiYTFmcTeVBoouNjTISd07GQzpoLChuGC20U3+1NbT+CkK8xWR/x1ao2D3JY3Ds6AD9ubWRNWRLptt/xOn5Vq3F8xZyUYchwvDMl4zKCuTKxQGVdHKsINb2DehKcP5cVL6MMvqzEdfj2g99vqXAqs8uuo6dOmvxmHV43bfzDaAJSabjZZs6TKlWTqCQMet8uxyx2Dmjl2lxLwdqv12oJdrszacasn41NYuEyHI2bXyef1mhWGYN4n9bU/Y5winctZ8DOSOZvYg/2ziAaUN0+CTn1IESwVesrPz23P2Sy7wdLxu8dSIZ2yUHl7OsA5T5a5rDchAGguRVNBWvoGtuepEhdRacxTQUo1cMFZsEXjgRKKjdfc1emYQPVdN8mBv8GJwndty473ZXdvFt5R0kNVFtvWuYCa6UYJD2cKrsPSAfbZCDC/LiR3FOoTaUPMZUVkR2ACEO7Dn4+KlmBajqT40Osk/A7k1XA/TzVhMIpLtE0Vk2DfPmGsjCv8bC+MFd+R2Sc8SFdE92oEWRdoPQY5SxMYQtGxA+cbKVlT1kSw6y80yEbx5JZsBnT6+NTHwmDO3kVU9ztLdawOozTElKNAK8HoAyFmzIZ3wL64oThuDrv/TUuY8Iyn814= + file_glob: true + file: + - releases/*.tar.gz + - releases/*.tar.gz.sha256sum + skip_cleanup: true + on: + repo: containerd/containerd + tags: true + - provider: gcs + access_key_id: GOOG1EJPAMPUV4MOGUSPRFM427Q5QOTNODQTMJYPXJFDF46IZLX2NGUQX3T7Q + secret_access_key: + secure: l3ITadMltGpYXShigdyRfpA7VuNcpGNrY9adB/1dQ5UVp0ZyRyimWX5+ea45JArh95iQCp11kY/7gKgL3tKAPsOXa9Lbt59n3XtlrVk5sqmd4S5+ZaI4Za4cRnkhkIAqro/IYsnzdLHqhCFYEmEDyMiI45RBkYYea+fnZFAGaTePmGwnD2LOn7A1z+dDGHt5g1Rpmdj1kB/AsHG6Wr8oGhMg9RlzSkAw2EAc1X3/9ofjOVM0AyB/hAgm/vmgisnqRSKzILqhL04d5b3gavrFn2YjrSEqP102BgYksn7EsJd1NMjA6Hj/qfVNCTn+rL8M85IE6JIAjrBog/HFv8Ez1bl1kSbB9UmAYZizEi7VD/fcxukYOPgqjDUoLrNaS3q+K0DkE1jzzcr72iMM+I8WJga7Vh4+MYjXadD5V96i2QDpthkEMvy1EpWvwQSl/fexaz2nJA5/CiX/V9GnWVsZiWlq/qMxji/ZbWsB04zRDfk9JneI7tubTNYj5FHrzhCQ7jrZYnXY/pb0sQkF1qczpH4PaXXgLnN00xffNudhsA6xZe/d22Yq+LELXeEmfOKD5j/DGdJGINgMj8RcngyKK6znBlBZ7nF3yqhLg4fHrCk9iOivGUXvKqdruqH+Yl7DUAp1Y0sySFlPF4I8RzMPHGPFqAJ9Q+rN2BNslClHAuA= + # TODO: use cri-containerd-release after testing. + #bucket: cri-containerd-release + bucket: cri-containerd-staging + skip_cleanup: true + acl: public-read + file: + - releases/cri/*.tar.gz + - releases/cri/*.tar.gz.sha256 + # TODO: only deploy on tag after testing. + #on: + # repo: containerd/containerd + # tags: true diff --git a/Makefile b/Makefile index f6effe3287b7..671c0c31c5e6 100644 --- a/Makefile +++ b/Makefile @@ -223,6 +223,7 @@ release: $(BINARIES) @install $(BINARIES) releases/$(RELEASE)/bin @cd releases/$(RELEASE) && tar -czf ../$(RELEASE).tar.gz * @cd releases && sha256sum $(RELEASE).tar.gz >$(RELEASE).tar.gz.sha256sum + @VERSION=$(VERSION) script/release/release-cri clean: ## clean up binaries @echo "$(WHALE) $@" diff --git a/script/release/release-cri b/script/release/release-cri new file mode 100755 index 000000000000..5092d75935fc --- /dev/null +++ b/script/release/release-cri @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Releases and publishes cri-containerd release tarball. +# +set -eu -o pipefail + +if [[ -z "${VERSION:-}" ]]; then + echo "VERSION is not set" + exit 1 +fi + +ROOT=${GOPATH}/src/github.com/containerd/containerd +CRI_COMMIT=$(grep github.com/containerd/cri ${ROOT}/vendor.conf | cut -d " " -f 2) + +go get -d github.com/containerd/cri/... +cd $GOPATH/src/github.com/containerd/cri +git checkout $CRI_COMMIT +make clean +make release TARBALL_PREFIX=cri-containerd LOCAL_RELEASE=true VERSION=${VERSION} +make release TARBALL_PREFIX=cri-containerd-cni LOCAL_RELEASE=true INCLUDE_CNI=true VERSION=${VERSION} + +mkdir -p ${ROOT}/releases/cri +cp _output/*.tar.gz ${ROOT}/releases/cri +cp _output/*.tar.gz.sha256 ${ROOT}/releases/cri diff --git a/vendor.conf b/vendor.conf index 0ebeca5710db..04e6449ad3be 100644 --- a/vendor.conf +++ b/vendor.conf @@ -49,7 +49,7 @@ go.opencensus.io v0.22.0 github.com/imdario/mergo v0.3.7 # cri dependencies -github.com/containerd/cri 0165d516161e25e52b4ab52a404a00823f8f0ef6 # master +github.com/containerd/cri f4d75d321c89b8d89bae570a7d2da1b3846c096f # release/1.3 github.com/containerd/go-cni 49fbd9b210f3c8ee3b7fd3cd797aabaf364627c1 github.com/containernetworking/cni v0.7.1 github.com/containernetworking/plugins v0.7.6 diff --git a/vendor/github.com/containerd/cri/pkg/server/io/container_io.go b/vendor/github.com/containerd/cri/pkg/server/io/container_io.go index 7edf627c63b5..509e4993e1b4 100644 --- a/vendor/github.com/containerd/cri/pkg/server/io/container_io.go +++ b/vendor/github.com/containerd/cri/pkg/server/io/container_io.go @@ -105,18 +105,20 @@ func (c *ContainerIO) Config() cio.Config { // to output stream. func (c *ContainerIO) Pipe() { wg := c.closer.wg - wg.Add(1) - go func() { - if _, err := io.Copy(c.stdoutGroup, c.stdout); err != nil { - logrus.WithError(err).Errorf("Failed to pipe stdout of container %q", c.id) - } - c.stdout.Close() - c.stdoutGroup.Close() - wg.Done() - logrus.Infof("Finish piping stdout of container %q", c.id) - }() + if c.stdout != nil { + wg.Add(1) + go func() { + if _, err := io.Copy(c.stdoutGroup, c.stdout); err != nil { + logrus.WithError(err).Errorf("Failed to pipe stdout of container %q", c.id) + } + c.stdout.Close() + c.stdoutGroup.Close() + wg.Done() + logrus.Infof("Finish piping stdout of container %q", c.id) + }() + } - if !c.fifos.Terminal { + if !c.fifos.Terminal && c.stderr != nil { wg.Add(1) go func() { if _, err := io.Copy(c.stderrGroup, c.stderr); err != nil { diff --git a/vendor/github.com/containerd/cri/pkg/server/io/helpers.go b/vendor/github.com/containerd/cri/pkg/server/io/helpers.go index e9a0016f64a5..6ca5931f2604 100644 --- a/vendor/github.com/containerd/cri/pkg/server/io/helpers.go +++ b/vendor/github.com/containerd/cri/pkg/server/io/helpers.go @@ -120,17 +120,21 @@ func newStdioPipes(fifos *cio.FIFOSet) (_ *stdioPipes, _ *wgCloser, err error) { set = append(set, f) } - if f, err = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil { - return nil, nil, err + if fifos.Stdout != "" { + if f, err = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil { + return nil, nil, err + } + p.stdout = f + set = append(set, f) } - p.stdout = f - set = append(set, f) - if f, err = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil { - return nil, nil, err + if fifos.Stderr != "" { + if f, err = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil { + return nil, nil, err + } + p.stderr = f + set = append(set, f) } - p.stderr = f - set = append(set, f) return p, &wgCloser{ wg: &sync.WaitGroup{}, From e3abd03ae190bf11c11a035aafb139196c6f7794 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Mon, 9 Sep 2019 16:04:35 -0700 Subject: [PATCH 54/77] Fix CRI release build. Signed-off-by: Lantao Liu --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 671c0c31c5e6..0bcdaf681005 100644 --- a/Makefile +++ b/Makefile @@ -223,7 +223,7 @@ release: $(BINARIES) @install $(BINARIES) releases/$(RELEASE)/bin @cd releases/$(RELEASE) && tar -czf ../$(RELEASE).tar.gz * @cd releases && sha256sum $(RELEASE).tar.gz >$(RELEASE).tar.gz.sha256sum - @VERSION=$(VERSION) script/release/release-cri + @VERSION=$(VERSION:v%=%) script/release/release-cri clean: ## clean up binaries @echo "$(WHALE) $@" From 1b4cec9796e669e46d6e633e3e361fd00d448a74 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Mon, 9 Sep 2019 23:08:02 -0700 Subject: [PATCH 55/77] Update cri test to fix image reference test and fix gcs deploy. Signed-off-by: Lantao Liu --- .travis.yml | 9 +++++++-- script/setup/install-critools | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 80eaa30610ca..8ae31a62542d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -129,6 +129,11 @@ deploy: repo: containerd/containerd tags: true - provider: gcs + # Use master branch to work around https://github.com/travis-ci/dpl/issues/792. + # TODO: Remove this when the fix for https://github.com/travis-ci/dpl/issues/792 + # is rolled out. + edge: + branch: master access_key_id: GOOG1EJPAMPUV4MOGUSPRFM427Q5QOTNODQTMJYPXJFDF46IZLX2NGUQX3T7Q secret_access_key: secure: l3ITadMltGpYXShigdyRfpA7VuNcpGNrY9adB/1dQ5UVp0ZyRyimWX5+ea45JArh95iQCp11kY/7gKgL3tKAPsOXa9Lbt59n3XtlrVk5sqmd4S5+ZaI4Za4cRnkhkIAqro/IYsnzdLHqhCFYEmEDyMiI45RBkYYea+fnZFAGaTePmGwnD2LOn7A1z+dDGHt5g1Rpmdj1kB/AsHG6Wr8oGhMg9RlzSkAw2EAc1X3/9ofjOVM0AyB/hAgm/vmgisnqRSKzILqhL04d5b3gavrFn2YjrSEqP102BgYksn7EsJd1NMjA6Hj/qfVNCTn+rL8M85IE6JIAjrBog/HFv8Ez1bl1kSbB9UmAYZizEi7VD/fcxukYOPgqjDUoLrNaS3q+K0DkE1jzzcr72iMM+I8WJga7Vh4+MYjXadD5V96i2QDpthkEMvy1EpWvwQSl/fexaz2nJA5/CiX/V9GnWVsZiWlq/qMxji/ZbWsB04zRDfk9JneI7tubTNYj5FHrzhCQ7jrZYnXY/pb0sQkF1qczpH4PaXXgLnN00xffNudhsA6xZe/d22Yq+LELXeEmfOKD5j/DGdJGINgMj8RcngyKK6znBlBZ7nF3yqhLg4fHrCk9iOivGUXvKqdruqH+Yl7DUAp1Y0sySFlPF4I8RzMPHGPFqAJ9Q+rN2BNslClHAuA= @@ -138,8 +143,8 @@ deploy: skip_cleanup: true acl: public-read file: - - releases/cri/*.tar.gz - - releases/cri/*.tar.gz.sha256 + - releases/cri/*.tar.gz + - releases/cri/*.tar.gz.sha256 # TODO: only deploy on tag after testing. #on: # repo: containerd/containerd diff --git a/script/setup/install-critools b/script/setup/install-critools index 08fa0e952b21..6ae9bdd984f7 100755 --- a/script/setup/install-critools +++ b/script/setup/install-critools @@ -21,7 +21,7 @@ set -eu -o pipefail go get -u github.com/onsi/ginkgo/ginkgo -CRITEST_COMMIT=v1.15.0 +CRITEST_COMMIT=427262054f59f3b849391310856a19474acb7e83 go get -d github.com/kubernetes-incubator/cri-tools/... cd $GOPATH/src/github.com/kubernetes-incubator/cri-tools git checkout $CRITEST_COMMIT From 3db5a5ad2ed9a91c1d3ed615bc81cb513523b6d2 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 9 Sep 2019 15:27:40 -0700 Subject: [PATCH 56/77] Fix darwin build for cri release Split release target for cri Signed-off-by: Derek McGowan --- .travis.yml | 10 ++++++---- Makefile | 11 +++++++++-- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index 8ae31a62542d..66c1dc889918 100644 --- a/.travis.yml +++ b/.travis.yml @@ -115,6 +115,7 @@ after_success: before_deploy: - make release + - if [ "$TRAVIS_GOOS" = "linux" ]; then make cri-release; fi deploy: - provider: releases @@ -145,7 +146,8 @@ deploy: file: - releases/cri/*.tar.gz - releases/cri/*.tar.gz.sha256 - # TODO: only deploy on tag after testing. - #on: - # repo: containerd/containerd - # tags: true + on: + repo: containerd/containerd + # TODO: switch `tags: true` after validating on master + branch: master + condition: $TRAVIS_GOOS = linux diff --git a/Makefile b/Makefile index 0bcdaf681005..84ea389388f4 100644 --- a/Makefile +++ b/Makefile @@ -216,13 +216,20 @@ install-man: @echo "$(WHALE) $@" $(foreach manpage,$(addprefix man/,$(MANPAGES)), $(call installmanpage,$(manpage),$(subst .,,$(suffix $(manpage))),$(notdir $(manpage)))) -release: $(BINARIES) +releases/$(RELEASE).tar.gz: $(BINARIES) @echo "$(WHALE) $@" @rm -rf releases/$(RELEASE) releases/$(RELEASE).tar.gz @install -d releases/$(RELEASE)/bin @install $(BINARIES) releases/$(RELEASE)/bin - @cd releases/$(RELEASE) && tar -czf ../$(RELEASE).tar.gz * + @tar -czf releases/$(RELEASE).tar.gz -C releases/$(RELEASE) bin + @rm -rf releases/$(RELEASE) + +release: $(BINARIES) releases/$(RELEASE).tar.gz + @echo "$(WHALE) $@" @cd releases && sha256sum $(RELEASE).tar.gz >$(RELEASE).tar.gz.sha256sum + +cri-release: $(BINARIES) releases/$(RELEASE).tar.gz + @echo "$(WHALE) $@" @VERSION=$(VERSION:v%=%) script/release/release-cri clean: ## clean up binaries From 424d33c5b70d4f8ac430a23dc6f329688ef23521 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 11 Sep 2019 11:20:34 -0400 Subject: [PATCH 57/77] Update go-runc to e029b79d8cda8374981c64eba71f28e Includes fix for user namespaces and NOTIFY_SOCKET. Signed-off-by: Michael Crosby --- vendor.conf | 2 +- .../containerd/go-runc/command_linux.go | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/vendor.conf b/vendor.conf index 04e6449ad3be..c6542d09293d 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,4 +1,4 @@ -github.com/containerd/go-runc 9007c2405372fe28918845901a3276c0915689a1 +github.com/containerd/go-runc e029b79d8cda8374981c64eba71f28ec38e5526f github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f github.com/containerd/cgroups c4b9ac5c7601384c965b9646fc515884e091ebb9 github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40 diff --git a/vendor/github.com/containerd/go-runc/command_linux.go b/vendor/github.com/containerd/go-runc/command_linux.go index 71b52f9de4e8..8a30f679d08f 100644 --- a/vendor/github.com/containerd/go-runc/command_linux.go +++ b/vendor/github.com/containerd/go-runc/command_linux.go @@ -20,6 +20,7 @@ import ( "context" "os" "os/exec" + "strings" "syscall" ) @@ -32,10 +33,24 @@ func (r *Runc) command(context context.Context, args ...string) *exec.Cmd { cmd.SysProcAttr = &syscall.SysProcAttr{ Setpgid: r.Setpgid, } - cmd.Env = os.Environ() + cmd.Env = filterEnv(os.Environ(), "NOTIFY_SOCKET") // NOTIFY_SOCKET introduces a special behavior in runc but should only be set if invoked from systemd if r.PdeathSignal != 0 { cmd.SysProcAttr.Pdeathsig = r.PdeathSignal } return cmd } + +func filterEnv(in []string, names ...string) []string { + out := make([]string, 0, len(in)) +loop0: + for _, v := range in { + for _, k := range names { + if strings.HasPrefix(v, k+"=") { + continue loop0 + } + } + out = append(out, v) + } + return out +} From d1a12823b73c5a1eb7fc0ed697806e28388648bd Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Tue, 10 Sep 2019 11:26:17 -0700 Subject: [PATCH 58/77] Update gcs cri release. Signed-off-by: Lantao Liu --- .travis.yml | 19 ++-------------- script/release/deploy-cri | 35 +++++++++++++++++++++++++++++ script/release/gcp-secret.json.enc | Bin 0 -> 2352 bytes 3 files changed, 37 insertions(+), 17 deletions(-) create mode 100755 script/release/deploy-cri create mode 100644 script/release/gcp-secret.json.enc diff --git a/.travis.yml b/.travis.yml index 66c1dc889918..b62d6bf3d56b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -129,23 +129,8 @@ deploy: on: repo: containerd/containerd tags: true - - provider: gcs - # Use master branch to work around https://github.com/travis-ci/dpl/issues/792. - # TODO: Remove this when the fix for https://github.com/travis-ci/dpl/issues/792 - # is rolled out. - edge: - branch: master - access_key_id: GOOG1EJPAMPUV4MOGUSPRFM427Q5QOTNODQTMJYPXJFDF46IZLX2NGUQX3T7Q - secret_access_key: - secure: l3ITadMltGpYXShigdyRfpA7VuNcpGNrY9adB/1dQ5UVp0ZyRyimWX5+ea45JArh95iQCp11kY/7gKgL3tKAPsOXa9Lbt59n3XtlrVk5sqmd4S5+ZaI4Za4cRnkhkIAqro/IYsnzdLHqhCFYEmEDyMiI45RBkYYea+fnZFAGaTePmGwnD2LOn7A1z+dDGHt5g1Rpmdj1kB/AsHG6Wr8oGhMg9RlzSkAw2EAc1X3/9ofjOVM0AyB/hAgm/vmgisnqRSKzILqhL04d5b3gavrFn2YjrSEqP102BgYksn7EsJd1NMjA6Hj/qfVNCTn+rL8M85IE6JIAjrBog/HFv8Ez1bl1kSbB9UmAYZizEi7VD/fcxukYOPgqjDUoLrNaS3q+K0DkE1jzzcr72iMM+I8WJga7Vh4+MYjXadD5V96i2QDpthkEMvy1EpWvwQSl/fexaz2nJA5/CiX/V9GnWVsZiWlq/qMxji/ZbWsB04zRDfk9JneI7tubTNYj5FHrzhCQ7jrZYnXY/pb0sQkF1qczpH4PaXXgLnN00xffNudhsA6xZe/d22Yq+LELXeEmfOKD5j/DGdJGINgMj8RcngyKK6znBlBZ7nF3yqhLg4fHrCk9iOivGUXvKqdruqH+Yl7DUAp1Y0sySFlPF4I8RzMPHGPFqAJ9Q+rN2BNslClHAuA= - # TODO: use cri-containerd-release after testing. - #bucket: cri-containerd-release - bucket: cri-containerd-staging - skip_cleanup: true - acl: public-read - file: - - releases/cri/*.tar.gz - - releases/cri/*.tar.gz.sha256 + - provider: script + script: bash script/release/deploy-cri on: repo: containerd/containerd # TODO: switch `tags: true` after validating on master diff --git a/script/release/deploy-cri b/script/release/deploy-cri new file mode 100755 index 000000000000..fdfb23bf5de8 --- /dev/null +++ b/script/release/deploy-cri @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Deploy the cri-containerd release tarball to gcs. +# +set -eu -o pipefail + +ROOT=${GOPATH}/src/github.com/containerd/containerd +# TODO: Change cri-containerd-release after tested. +BUCKET="gs://cri-containerd-staging" + +rm -rf "${HOME}/google-cloud-sdk" +export CLOUDSDK_CORE_DISABLE_PROMPTS=1 +curl https://sdk.cloud.google.com | bash > /dev/null +gcloud version + +openssl aes-256-cbc -K $encrypted_0a6446eb3ae3_key -iv $encrypted_0a6446eb3ae3_iv -in "${ROOT}/script/release/gcp-secret.json.enc" -out gcp-secret.json -d +gcloud auth activate-service-account --key-file gcp-secret.json --project=k8s-cri-containerd + +gsutil cp "${ROOT}/releases/cri/*.tar.gz" "${BUCKET}" +gsutil cp "${ROOT}/releases/cri/*.tar.gz.sha256" "${BUCKET}" diff --git a/script/release/gcp-secret.json.enc b/script/release/gcp-secret.json.enc new file mode 100644 index 0000000000000000000000000000000000000000..53e623a5597fe513d4524107dd538da6d3e2d708 GIT binary patch literal 2352 zcmV-03D5R)lfxhfmvJR9g{qwzu7iTKPO=mT8-DI#Hf19LrYLfpq8c15;s>-~(|L&3 z^;->H-&CLp(58)XLwLfz6Y@DE`y8n^qq*^C)&{OeE{%d?y|OSn^fv-oVM2y8f(mIS zc^%u`r0&~~W%Nfh1}Gubd75?XcImRul;@N}TEh}Iblp2V9#&6IeWO)Xc|aTN2BHPe zDK`%wfZRM}m$xHO55kZ!d7CoCh*iac{_>LR?w% ze%K8@g0$+bac5 z1Jx$wQjZ8Vdn!fNon5SXZUy#;IuTKUy_DJWsL%~Gpv^RGGi7I?my=jzZzEe_X7gcu z^`CEA9Q#zdrAJihTdjXn?u)>^Ait_d*~0nmOV1L^lO z;1>4^0Yh(d+Cpm>G{zS6)AWt8uz8SjbX7_I{j+N;NTjq$o>{XeE><3+_b753b7f74 zCdn=9-G{xu%nZ7NSsGSFi6>rVQXq21IOx{fER%c`UMID#JXw zWi6YAxP*q8RZAJ``4@>AUwi>XX5HEV!kf`LvmoB374c3o8P z)<>dB&kl7%f(V5J%^}k9@3;u+o0Sf|KMN~9;~nwhvs%miu;*ec@iMPTnbX({Sv=XP zsS=P5^c8}N6gEcMMN67&KQc(n7)#&dUEfyOd3-g(4Pu_P`5HCL!Ejk9xsBgXJ9(Ec zqW2GBAEuRS7CJIiTb&veS!y}Ar^TdM>XpY?sHXYsSF{Ucw(fKZeZKicsHRrNs+uvY zH1r>(F&g>yE!-BfqP%BQ52>&;#6|ao$M26oJddNB4B?htj%IB5*_MA--Wgmz5SPvV zM=-Y4vDza~Yu&|s!Qmu7f9zFf(mUD{JFd+;t?e@WQp%~5TxH|&Se9|+G?v?_L(iBr z1k$3U(qSgM`^XJrj^WK#spH69-X-PUe+S@s&Y3k0e`nSXUuMtjtzLj3-A;NvyWSoZ zAP8D42_Zfwoeiqb9d40Kd@e_TP;cb)dj}Vr=+o!gfRPh@_6yvk?d1%crMSyAv>FgA zkMFvdjWCkf=DcReVhJ|LnC?FQq&O?KqgO~~n1n$_M8C1c$uvBK3PRuJk>hzOg@?-R zl90=pYu^@&TG`UAYB0bN!K9m0Ll+68|IkqU;Yrv?!DW7v{vCq{)T_uy%e3`aP>)PC zBi&rX)^H)C;MP!26gaqQ^d;qWkm?RqZq)X=O6ysLKVKA z57#z<{E!sF@7cw2&G zFR$#N8T@gQ6j8U^ovD0-6cEMEwmM|CH(e=D$6 z2)o2uu)SyI@HCFscSA}2N~~IxyG%o$f4+W)-dU=bjXqIHaIEX8EDzBp$vRI+&_7^Q zBP6TGd}Yp!HKZst?(h967!Sl>co?l~R?gC(!nLIC&vfhHJwIZ&N^08~q16s@RT8Ob z!7UJC2Zwm+NPggamF14B~75M9a>S}l$4-Fgx?cSS7{6 zS&(-efq-a#f@>`s59Sk|T5wghEl|!m`y{@Gfc4tx^K)D&^P3b@s#DT9$C!*TbIcAm zQbURXxdlwOhH6Qyu=Tx&q9II4>N|FF$87_=OqHY_K@r`!UJ0v@#14_8xIkjwyI`OO zM@@wQza_%xlQ!HHX_N5u)l^nGuD~_`%7-HmnnRGLU3)WiLsJ}D933iXa-5tU zD1}(#e7SXPrL#C)o7AV2`adk;Dl7%#A>Aui;&x{=NODw6g0TbK8d0db zEZ<7_S~<9q{RW@|gxOI)HfyHf3dz<&o-1R5TsTW%h@8mpani7VYB5PSx~kgA zYb&jnL53EL01O#Fpyq@+{PFG03JxQ7z!o z)3eA<7Xj6PaykTpSViMb+Gdc3|HXg~#q6+gzk}Tr7yo+psqngMSF3-VKmf`#b%|@p zJW-)L_rZ!v(^g({HOU0H7T?B%;~(_g@dEWbb8$8JKS&hZt7obpwcJCzATicUMsX2LJej*zXBYp*kikd$j#x zkS=#}*g@j77Yj&JBrBFg`fP8#?LoHE*&Te?wNaR=M!TOF^5Ue4LHent?WW)F zuLZ!3u5Z0wubU_1X|-TqMFWyZQ2~p=SDFim8-A81`k`6-lUUC<<_@>;sC#$EUD4M|K}qw z5AL!&YdgEJ#0o@Kw`}+H4|LsD@NE%6POP`PmF=BJj4ZT5SQVk6UjWS3b>AA(^(GZ^ WKso8)VH##R9451@a;n@$^QRx!B#0IO literal 0 HcmV?d00001 From f3a5b8c0a92564310b7ff27273a6d72844e42598 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 10 Sep 2019 12:12:25 -0400 Subject: [PATCH 59/77] Add command to generate man pages The climan package has a command that can be registered with any urfav cli app to generate man pages. Signed-off-by: Michael Crosby --- .golangci.yml | 1 + Makefile | 8 +- cmd/containerd-stress/main.go | 2 + cmd/containerd/command/main.go | 11 + cmd/containerd/main.go | 2 + cmd/ctr/app/main.go | 5 + cmd/ctr/main.go | 2 + docs/man/containerd.1.md | 61 -- docs/man/ctr.1.md | 98 --- pkg/climan/cli.go | 75 ++ vendor.conf | 2 +- vendor/github.com/urfave/cli/README.md | 302 ++++++-- vendor/github.com/urfave/cli/app.go | 85 ++- vendor/github.com/urfave/cli/category.go | 2 +- vendor/github.com/urfave/cli/cli.go | 2 +- vendor/github.com/urfave/cli/command.go | 141 ++-- vendor/github.com/urfave/cli/context.go | 80 ++- vendor/github.com/urfave/cli/docs.go | 148 ++++ vendor/github.com/urfave/cli/fish.go | 194 ++++++ vendor/github.com/urfave/cli/flag.go | 651 +++--------------- vendor/github.com/urfave/cli/flag_bool.go | 109 +++ vendor/github.com/urfave/cli/flag_bool_t.go | 110 +++ vendor/github.com/urfave/cli/flag_duration.go | 106 +++ vendor/github.com/urfave/cli/flag_float64.go | 106 +++ .../github.com/urfave/cli/flag_generated.go | 627 ----------------- vendor/github.com/urfave/cli/flag_generic.go | 110 +++ vendor/github.com/urfave/cli/flag_int.go | 105 +++ vendor/github.com/urfave/cli/flag_int64.go | 106 +++ .../github.com/urfave/cli/flag_int64_slice.go | 141 ++++ .../github.com/urfave/cli/flag_int_slice.go | 142 ++++ vendor/github.com/urfave/cli/flag_string.go | 98 +++ .../urfave/cli/flag_string_slice.go | 138 ++++ vendor/github.com/urfave/cli/flag_uint.go | 106 +++ vendor/github.com/urfave/cli/flag_uint64.go | 106 +++ vendor/github.com/urfave/cli/funcs.go | 3 + vendor/github.com/urfave/cli/go.mod | 9 + vendor/github.com/urfave/cli/help.go | 183 ++--- vendor/github.com/urfave/cli/parse.go | 80 +++ vendor/github.com/urfave/cli/sort.go | 29 + vendor/github.com/urfave/cli/template.go | 121 ++++ 40 files changed, 2811 insertions(+), 1596 deletions(-) delete mode 100644 docs/man/containerd.1.md delete mode 100644 docs/man/ctr.1.md create mode 100644 pkg/climan/cli.go create mode 100644 vendor/github.com/urfave/cli/docs.go create mode 100644 vendor/github.com/urfave/cli/fish.go create mode 100644 vendor/github.com/urfave/cli/flag_bool.go create mode 100644 vendor/github.com/urfave/cli/flag_bool_t.go create mode 100644 vendor/github.com/urfave/cli/flag_duration.go create mode 100644 vendor/github.com/urfave/cli/flag_float64.go delete mode 100644 vendor/github.com/urfave/cli/flag_generated.go create mode 100644 vendor/github.com/urfave/cli/flag_generic.go create mode 100644 vendor/github.com/urfave/cli/flag_int.go create mode 100644 vendor/github.com/urfave/cli/flag_int64.go create mode 100644 vendor/github.com/urfave/cli/flag_int64_slice.go create mode 100644 vendor/github.com/urfave/cli/flag_int_slice.go create mode 100644 vendor/github.com/urfave/cli/flag_string.go create mode 100644 vendor/github.com/urfave/cli/flag_string_slice.go create mode 100644 vendor/github.com/urfave/cli/flag_uint.go create mode 100644 vendor/github.com/urfave/cli/flag_uint64.go create mode 100644 vendor/github.com/urfave/cli/go.mod create mode 100644 vendor/github.com/urfave/cli/parse.go create mode 100644 vendor/github.com/urfave/cli/sort.go create mode 100644 vendor/github.com/urfave/cli/template.go diff --git a/.golangci.yml b/.golangci.yml index 9257631131c2..d9ea43977cf9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -19,3 +19,4 @@ run: - api - design - docs + - docs/man diff --git a/Makefile b/Makefile index 84ea389388f4..487f6954ebb0 100644 --- a/Makefile +++ b/Makefile @@ -82,6 +82,7 @@ TEST_REQUIRES_ROOT_PACKAGES=$(filter \ # Project binaries. COMMANDS=ctr containerd containerd-stress +MANBINARIES=ctr containerd containerd-stress MANPAGES=ctr.1 containerd.1 containerd-config.1 containerd-config.toml.5 ifdef BUILDTAGS @@ -114,7 +115,7 @@ BINARIES=$(addprefix bin/,$(COMMANDS)) TESTFLAGS ?= $(TESTFLAGS_RACE) TESTFLAGS_PARALLEL ?= 8 -.PHONY: clean all AUTHORS build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release mandir install-man +.PHONY: clean all AUTHORS build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release mandir install-man genman .DEFAULT: default all: binaries @@ -203,6 +204,11 @@ man: mandir $(addprefix man/,$(MANPAGES)) mandir: @mkdir -p man +genman: $(addprefix genman/,$(MANBINARIES)) + +genman/%: bin/% FORCE + "$<" gen-man --format man man/ + man/%: docs/man/%.md FORCE @echo "$(WHALE) $<" go-md2man -in "$<" -out "$@" diff --git a/cmd/containerd-stress/main.go b/cmd/containerd-stress/main.go index 9ef058b907f5..b842df333a95 100644 --- a/cmd/containerd-stress/main.go +++ b/cmd/containerd-stress/main.go @@ -30,6 +30,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/climan" "github.com/containerd/containerd/plugin" metrics "github.com/docker/go-metrics" "github.com/sirupsen/logrus" @@ -161,6 +162,7 @@ func main() { } app.Commands = []cli.Command{ densityCommand, + climan.Command, } app.Action = func(context *cli.Context) error { config := config{ diff --git a/cmd/containerd/command/main.go b/cmd/containerd/command/main.go index 727b29f4d409..9bde46b47644 100644 --- a/cmd/containerd/command/main.go +++ b/cmd/containerd/command/main.go @@ -70,6 +70,17 @@ func App() *cli.App { app.Name = "containerd" app.Version = version.Version app.Usage = usage + app.Description = ` +containerd is a high performance container runtime whose daemon can be started +by using this command. If none of the *config*, *publish*, or *help* commands +are specified, the default action of the **containerd** command is to start the +containerd daemon in the foreground. + + +A default configuration is used if no TOML configuration is specified or located +at the default file location. The *containerd config* command can be used to +generate the default configuration for containerd. The output of that command +can be used and modified as necessary as a custom configuration.` app.Flags = []cli.Flag{ cli.StringFlag{ Name: "config,c", diff --git a/cmd/containerd/main.go b/cmd/containerd/main.go index 10bde45bd444..d258c9500f59 100644 --- a/cmd/containerd/main.go +++ b/cmd/containerd/main.go @@ -21,6 +21,7 @@ import ( "os" "github.com/containerd/containerd/cmd/containerd/command" + "github.com/containerd/containerd/pkg/climan" "github.com/containerd/containerd/pkg/seed" ) @@ -30,6 +31,7 @@ func init() { func main() { app := command.App() + app.Commands = append(app.Commands, climan.Command) if err := app.Run(os.Args); err != nil { fmt.Fprintf(os.Stderr, "containerd: %s\n", err) os.Exit(1) diff --git a/cmd/ctr/app/main.go b/cmd/ctr/app/main.go index 9c33216b3353..d0b277cf28c0 100644 --- a/cmd/ctr/app/main.go +++ b/cmd/ctr/app/main.go @@ -57,6 +57,11 @@ func New() *cli.App { app := cli.NewApp() app.Name = "ctr" app.Version = version.Version + app.Description = ` +ctr is an unsupported debug and administrative client for interacting +with the containerd daemon. Because it is unsupported, the commands, +options, and operations are not guaranteed to be backward compatible or +stable from release to release of the containerd project.` app.Usage = ` __ _____/ /______ diff --git a/cmd/ctr/main.go b/cmd/ctr/main.go index cf72de28eb2f..9db84d22de9c 100644 --- a/cmd/ctr/main.go +++ b/cmd/ctr/main.go @@ -21,6 +21,7 @@ import ( "os" "github.com/containerd/containerd/cmd/ctr/app" + "github.com/containerd/containerd/pkg/climan" "github.com/containerd/containerd/pkg/seed" "github.com/urfave/cli" ) @@ -34,6 +35,7 @@ func init() { func main() { app := app.New() app.Commands = append(app.Commands, pluginCmds...) + app.Commands = append(app.Commands, climan.Command) if err := app.Run(os.Args); err != nil { fmt.Fprintf(os.Stderr, "ctr: %s\n", err) os.Exit(1) diff --git a/docs/man/containerd.1.md b/docs/man/containerd.1.md deleted file mode 100644 index 94b85ea35519..000000000000 --- a/docs/man/containerd.1.md +++ /dev/null @@ -1,61 +0,0 @@ -# containerd 1 01/29/2018 - -## NAME - -containerd - an industry-standard container runtime with an emphasis on simplicity, -robustness and portability - -## SYNOPSIS - -containerd [global options] command [command options] [arguments...] - -## DESCRIPTION - -**containerd** is a high performance container runtime whose daemon can be started -by using this command. If none of the *config*, *publish*, or *help* commands -are specified, the default action of the **containerd** command is to start the -containerd daemon in the foreground. - -A default configuration is used if no TOML configuration is specified or located -at the default file location. The *containerd config* command can be used to -generate the default configuration for containerd. The output of that command -can be used and modified as necessary as a custom configuration. - -The *publish* command is used internally by parts of the containerd runtime -to publish events. It is not meant to be used as a standalone utility. - -## OPTIONS - -**--config value, -c value** -: Specify the default path to the configuration file (default: "/etc/containerd/config.toml") - -**--log-level value, -l value** -: Set the logging level. Available levels are: [debug, info, warn, error, fatal, panic] - -**--address value, -a value** -: UNIX socket address for containerd's GRPC server to listen on (default: "/run/containerd/containerd.sock") - -**--root value** -: The containerd root directory (default: "/var/lib/containerd"). A persistent directory location where metadata and image content are stored - -**--state value** -: The containerd state directory (default: "/run/containerd"). A transient state directory used during containerd operation - -**--help, -h** -: Show containerd command help text - -**--version, -v** -: Print the containerd server version - -## BUGS - -Please file any specific issues that you encounter at -https://github.com/containerd/containerd. - -## AUTHOR - -Phil Estes - -## SEE ALSO - -ctr(1), containerd-config(1), containerd-config.toml(5) diff --git a/docs/man/ctr.1.md b/docs/man/ctr.1.md deleted file mode 100644 index f400883b8e5b..000000000000 --- a/docs/man/ctr.1.md +++ /dev/null @@ -1,98 +0,0 @@ -# ctr 1 01/30/2018 - -## NAME - -ctr - command line for containerd - -## SYNOPSIS - -**ctr [global options] command [command options] [arguments...]** - -## DESCRIPTION - -**ctr** is an unsupported debug and administrative client for interacting -with the containerd daemon. Because it is unsupported, the commands, -options, and operations are not guaranteed to be backward compatible or -stable from release to release of the containerd project. - -## OPTIONS - -The following commands are available in the **ctr** utility: - -**plugins,plugin** -: Provides information about containerd plugins - -**version** -: Prints the client and server versions - -**containers,c,container** -: Manages and interacts with containers - -**content** -: Manages and interacts with content - -**events,event** -: Displays containerd events - -**images,image** -: Manages and interacts with images - -**namespaces,namespace** -: Manages and interacts with containerd namespaces - -**pprof** -: Provides golang pprof outputs for containerd - -**run** -: Runs a container - -**snapshots,snapshot** -: Manages and interacts with snapshots - -**tasks,t,task** -: Manages and interacts with tasks - -**shim** -: Interacts with a containerd shim directly - -**help,h** -: Displays a list of commands or help for one specific command - -The following global options apply to all **ctr** commands: - -**--debug** -: Enable debug output in logs - -**--address value, -a value** -: Address for containerd's GRPC server (default: */run/containerd/containerd.sock*) - -**--timeout value** -: Total timeout for ctr commands (default: *0s*) - -**--connect-timeout value** -: Timeout for connecting to containerd (default: *0s*) - -**--namespace value, -n value** -: Namespace to use with commands (default: *default*) [also read from *$CONTAINERD_NAMESPACE*] - -**--help, -h** -: Show help text - -**--version, -v** -: Prints the **ctr** version - -## BUGS - -Note that the **ctr** utility is not an officially supported part of the -containerd project releases. - -However, please feel free to file any specific issues that you encounter at -https://github.com/containerd/containerd. - -## AUTHOR - -Phil Estes - -## SEE ALSO - -containerd(1), containerd-config(1), containerd-config.toml(5) diff --git a/pkg/climan/cli.go b/pkg/climan/cli.go new file mode 100644 index 000000000000..4302b28a3abe --- /dev/null +++ b/pkg/climan/cli.go @@ -0,0 +1,75 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package climan + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + "github.com/pkg/errors" + "github.com/urfave/cli" +) + +var Command = cli.Command{ + Name: "gen-man", + Usage: "generate man pages for the cli application", + Hidden: true, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "format,f", + Usage: "specify the format in (md:man)", + Value: "md", + }, + cli.IntFlag{ + Name: "section,s", + Usage: "section of the man pages", + Value: 1, + }, + }, + Action: func(clix *cli.Context) (err error) { + // clear out the usage as we use banners that do not display in man pages + clix.App.Usage = "" + dir := clix.Args().First() + if dir == "" { + return errors.New("directory argument is required") + } + var ( + data string + ext string + ) + switch clix.String("format") { + case "man": + data, err = clix.App.ToMan() + default: + data, err = clix.App.ToMarkdown() + ext = "md" + } + if err != nil { + return err + } + return ioutil.WriteFile(filepath.Join(dir, formatFilename(clix, clix.Int("section"), ext)), []byte(data), 0644) + }, +} + +func formatFilename(clix *cli.Context, section int, ext string) string { + s := fmt.Sprintf("%s.%d", clix.App.Name, section) + if ext != "" { + s += "." + ext + } + return s +} diff --git a/vendor.conf b/vendor.conf index c6542d09293d..bc08afad3bb7 100644 --- a/vendor.conf +++ b/vendor.conf @@ -23,7 +23,7 @@ github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 github.com/opencontainers/runc f4982d86f7fde0b6f953cc62ccc4022c519a10a9 # v1.0.0-rc8-32-gf4982d86 github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/sirupsen/logrus v1.4.1 -github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c +github.com/urfave/cli 388c2dd0f4ffaa8541e371d49c8413870a04d9fe # v1.22.0 + golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3 google.golang.org/grpc 6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0 github.com/pkg/errors v0.8.1 diff --git a/vendor/github.com/urfave/cli/README.md b/vendor/github.com/urfave/cli/README.md index 34055fe74457..96720b621bfa 100644 --- a/vendor/github.com/urfave/cli/README.md +++ b/vendor/github.com/urfave/cli/README.md @@ -3,15 +3,11 @@ cli [![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/urfave/cli) + [![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) [![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) [![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) -[![top level coverage](https://gocover.io/_badge/github.com/urfave/cli?0 "top level coverage")](http://gocover.io/github.com/urfave/cli) / -[![altsrc coverage](https://gocover.io/_badge/github.com/urfave/cli/altsrc?0 "altsrc coverage")](http://gocover.io/github.com/urfave/cli/altsrc) - -**Notice:** This is the library formerly known as -`github.com/codegangsta/cli` -- Github will automatically redirect requests -to this repository, but we recommend updating your references for clarity. +[![codecov](https://codecov.io/gh/urfave/cli/branch/master/graph/badge.svg)](https://codecov.io/gh/urfave/cli) cli is a simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line @@ -23,7 +19,7 @@ applications in an expressive way. - [Installation](#installation) * [Supported platforms](#supported-platforms) * [Using the `v2` branch](#using-the-v2-branch) - * [Pinning to the `v1` releases](#pinning-to-the-v1-releases) + * [Using `v1` releases](#using-v1-releases) - [Getting Started](#getting-started) - [Examples](#examples) * [Arguments](#arguments) @@ -32,11 +28,13 @@ applications in an expressive way. + [Alternate Names](#alternate-names) + [Ordering](#ordering) + [Values from the Environment](#values-from-the-environment) + + [Values from files](#values-from-files) + [Values from alternate input sources (YAML, TOML, and others)](#values-from-alternate-input-sources-yaml-toml-and-others) + [Precedence](#precedence) * [Subcommands](#subcommands) * [Subcommands categories](#subcommands-categories) * [Exit code](#exit-code) + * [Combining short options](#combining-short-options) * [Bash Completion](#bash-completion) + [Enabling](#enabling) + [Distribution](#distribution) @@ -62,7 +60,7 @@ organized, and expressive! ## Installation -Make sure you have a working Go environment. Go version 1.2+ is supported. [See +Make sure you have a working Go environment. Go version 1.10+ is supported. [See the install instructions for Go](http://golang.org/doc/install.html). To install cli, simply run: @@ -106,25 +104,20 @@ import ( ... ``` -### Pinning to the `v1` releases - -Similarly to the section above describing use of the `v2` branch, if one wants -to avoid any unexpected compatibility pains once `v2` becomes `master`, then -pinning to `v1` is an acceptable option, e.g.: +### Using `v1` releases ``` -$ go get gopkg.in/urfave/cli.v1 +$ go get github.com/urfave/cli ``` -``` go +```go ... import ( - "gopkg.in/urfave/cli.v1" // imports as package "cli" + "github.com/urfave/cli" ) ... ``` -This will pull the latest tagged `v1` release (e.g. `v1.18.1` at the time of writing). ## Getting Started @@ -139,13 +132,17 @@ discovery. So a cli app can be as little as one line of code in `main()`. package main import ( + "log" "os" "github.com/urfave/cli" ) func main() { - cli.NewApp().Run(os.Args) + err := cli.NewApp().Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -160,6 +157,7 @@ package main import ( "fmt" + "log" "os" "github.com/urfave/cli" @@ -174,7 +172,10 @@ func main() { return nil } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -198,6 +199,7 @@ package main import ( "fmt" + "log" "os" "github.com/urfave/cli" @@ -212,7 +214,10 @@ func main() { return nil } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -261,6 +266,7 @@ package main import ( "fmt" + "log" "os" "github.com/urfave/cli" @@ -274,7 +280,10 @@ func main() { return nil } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -290,6 +299,7 @@ package main import ( "fmt" + "log" "os" "github.com/urfave/cli" @@ -319,7 +329,10 @@ func main() { return nil } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -333,6 +346,7 @@ scanned. package main import ( + "log" "os" "fmt" @@ -366,7 +380,10 @@ func main() { return nil } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -387,6 +404,7 @@ For example this: package main import ( + "log" "os" "github.com/urfave/cli" @@ -402,7 +420,10 @@ func main() { }, } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -428,6 +449,7 @@ list for the `Name`. e.g. package main import ( + "log" "os" "github.com/urfave/cli" @@ -444,7 +466,10 @@ func main() { }, } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -468,6 +493,7 @@ For example this: package main import ( + "log" "os" "sort" @@ -511,7 +537,10 @@ func main() { sort.Sort(cli.FlagsByName(app.Flags)) sort.Sort(cli.CommandsByName(app.Commands)) - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -534,6 +563,7 @@ You can also have the default value set from the environment via `EnvVar`. e.g. package main import ( + "log" "os" "github.com/urfave/cli" @@ -551,7 +581,10 @@ func main() { }, } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -566,6 +599,7 @@ environment variable that resolves is used as the default. package main import ( + "log" "os" "github.com/urfave/cli" @@ -583,10 +617,52 @@ func main() { }, } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` +#### Values from files + +You can also have the default value set from file via `FilePath`. e.g. + + +``` go +package main + +import ( + "log" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "password, p", + Usage: "password for the mysql database", + FilePath: "/etc/mysql/password", + }, + } + + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } +} +``` + +Note that default values set from file (e.g. `FilePath`) take precedence over +default values set from the environment (e.g. `EnvVar`). + #### Values from alternate input sources (YAML, TOML, and others) There is a separate package altsrc that adds support for getting flag values @@ -594,6 +670,7 @@ from other file input sources. Currently supported input source formats: * YAML +* JSON * TOML In order to get values for a flag from an alternate input source the following @@ -616,9 +693,9 @@ the yaml input source for any flags that are defined on that command. As a note the "load" flag used would also have to be defined on the command flags in order for this code snipped to work. -Currently only the aboved specified formats are supported but developers can -add support for other input sources by implementing the -altsrc.InputSourceContext for their given sources. +Currently only YAML, JSON, and TOML files are supported but developers can add support +for other input sources by implementing the altsrc.InputSourceContext for their +given sources. Here is a more complete sample of a command using YAML support: @@ -631,6 +708,7 @@ package notmain import ( "fmt" + "log" "os" "github.com/urfave/cli" @@ -653,7 +731,10 @@ func main() { app.Before = altsrc.InitInputSourceWithContext(flags, altsrc.NewYamlSourceFromFlagFunc("load")) app.Flags = flags - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -679,6 +760,7 @@ package main import ( "fmt" + "log" "os" "github.com/urfave/cli" @@ -731,7 +813,10 @@ func main() { }, } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -747,6 +832,7 @@ E.g. package main import ( + "log" "os" "github.com/urfave/cli" @@ -769,7 +855,10 @@ func main() { }, } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -777,7 +866,7 @@ Will include: ``` COMMANDS: - noop + noop Template actions: add @@ -790,11 +879,14 @@ Calling `App.Run` will not automatically call `os.Exit`, which means that by default the exit code will "fall through" to being `0`. An explicit exit code may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a `cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.: - + ``` go package main import ( + "log" "os" "github.com/urfave/cli" @@ -803,22 +895,95 @@ import ( func main() { app := cli.NewApp() app.Flags = []cli.Flag{ - cli.BoolTFlag{ + cli.BoolFlag{ Name: "ginger-crouton", - Usage: "is it in the soup?", + Usage: "Add ginger croutons to the soup", }, } app.Action = func(ctx *cli.Context) error { if !ctx.Bool("ginger-crouton") { - return cli.NewExitError("it is not in the soup", 86) + return cli.NewExitError("Ginger croutons are not in the soup", 86) } return nil } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` +### Combining short options + +Traditional use of options using their shortnames look like this: + +``` +$ cmd -s -o -m "Some message" +``` + +Suppose you want users to be able to combine options with their shortnames. This +can be done using the `UseShortOptionHandling` bool in your app configuration, +or for individual commands by attaching it to the command configuration. For +example: + + +``` go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.UseShortOptionHandling = true + app.Commands = []cli.Command{ + { + Name: "short", + Usage: "complete a task on the list", + Flags: []cli.Flag{ + cli.BoolFlag{Name: "serve, s"}, + cli.BoolFlag{Name: "option, o"}, + cli.StringFlag{Name: "message, m"}, + }, + Action: func(c *cli.Context) error { + fmt.Println("serve:", c.Bool("serve")) + fmt.Println("option:", c.Bool("option")) + fmt.Println("message:", c.String("message")) + return nil + }, + }, + } + + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } +} +``` + +If your program has any number of bool flags such as `serve` and `option`, and +optionally one non-bool flag `message`, with the short options of `-s`, `-o`, +and `-m` respectively, setting `UseShortOptionHandling` will also support the +following syntax: + +``` +$ cmd -som "Some message" +``` + +If you enable `UseShortOptionHandling`, then you must not use any flags that +have a single leading `-` or this will result in failures. For example, +`-option` can no longer be used. Flags with two leading dashes (such as +`--options`) are still valid. + ### Bash Completion You can enable completion commands by setting the `EnableBashCompletion` @@ -835,6 +1000,7 @@ package main import ( "fmt" + "log" "os" "github.com/urfave/cli" @@ -866,7 +1032,10 @@ func main() { }, } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -906,6 +1075,7 @@ The default bash completion flag (`--generate-bash-completion`) is defined as package main import ( + "log" "os" "github.com/urfave/cli" @@ -924,7 +1094,10 @@ func main() { Name: "wat", }, } - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -950,6 +1123,7 @@ package main import ( "fmt" + "log" "io" "os" @@ -993,7 +1167,10 @@ VERSION: fmt.Println("Ha HA. I pwnd the help!!1") } - cli.NewApp().Run(os.Args) + err := cli.NewApp().Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -1008,6 +1185,7 @@ setting `cli.HelpFlag`, e.g.: package main import ( + "log" "os" "github.com/urfave/cli" @@ -1020,7 +1198,10 @@ func main() { EnvVar: "SHOW_HALP,HALPPLZ", } - cli.NewApp().Run(os.Args) + err := cli.NewApp().Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -1043,6 +1224,7 @@ setting `cli.VersionFlag`, e.g.: package main import ( + "log" "os" "github.com/urfave/cli" @@ -1057,7 +1239,10 @@ func main() { app := cli.NewApp() app.Name = "partay" app.Version = "19.99.0" - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -1072,6 +1257,7 @@ package main import ( "fmt" + "log" "os" "github.com/urfave/cli" @@ -1089,7 +1275,10 @@ func main() { app := cli.NewApp() app.Name = "partay" app.Version = "19.99.0" - app.Run(os.Args) + err := app.Run(os.Args) + if err != nil { + log.Fatal(err) + } } ``` @@ -1246,6 +1435,7 @@ func main() { cli.Uint64Flag{Name: "bigage"}, } app.EnableBashCompletion = true + app.UseShortOptionHandling = true app.HideHelp = false app.HideVersion = false app.BashComplete = func(c *cli.Context) { @@ -1351,7 +1541,7 @@ func main() { ec := cli.NewExitError("ohwell", 86) fmt.Fprintf(c.App.Writer, "%d", ec.ExitCode()) fmt.Printf("made it!\n") - return ec + return nil } if os.Getenv("HEXY") != "" { @@ -1365,7 +1555,9 @@ func main() { "whatever-values": 19.99, } - app.Run(os.Args) + + // ignore error so we don't exit non-zero and break gfmrun README example tests + _ = app.Run(os.Args) } func wopAction(c *cli.Context) error { @@ -1376,16 +1568,4 @@ func wopAction(c *cli.Context) error { ## Contribution Guidelines -Feel free to put up a pull request to fix a bug or maybe add a feature. I will -give it a code review and make sure that it does not break backwards -compatibility. If I or any other collaborators agree that it is in line with -the vision of the project, we will work with you to get the code into -a mergeable state and merge it into the master branch. - -If you have contributed something significant to the project, we will most -likely add you as a collaborator. As a collaborator you are given the ability -to merge others pull requests. It is very important that new code does not -break existing code, so be careful about what code you do choose to merge. - -If you feel like you have contributed to the project but have not yet been -added as a collaborator, we probably forgot to add you, please open an issue. +See [./CONTRIBUTING.md](./CONTRIBUTING.md) diff --git a/vendor/github.com/urfave/cli/app.go b/vendor/github.com/urfave/cli/app.go index 60599b04657e..76e869d37aad 100644 --- a/vendor/github.com/urfave/cli/app.go +++ b/vendor/github.com/urfave/cli/app.go @@ -1,9 +1,9 @@ package cli import ( + "flag" "fmt" "io" - "io/ioutil" "os" "path/filepath" "sort" @@ -11,9 +11,10 @@ import ( ) var ( - changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" - appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) - runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) + changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + // unused variable. commented for now. will remove in future if agreed upon by everyone + //runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." @@ -94,6 +95,10 @@ type App struct { // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. CustomAppHelpTemplate string + // Boolean to enable short-option handling so user can combine several + // single-character bool arguements into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool didSetup bool } @@ -138,7 +143,7 @@ func (a *App) Setup() { a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) } - newCmds := []Command{} + var newCmds []Command for _, c := range a.Commands { if c.HelpName == "" { c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) @@ -173,6 +178,14 @@ func (a *App) Setup() { } } +func (a *App) newFlagSet() (*flag.FlagSet, error) { + return flagSet(a.Name, a.Flags) +} + +func (a *App) useShortOptionHandling() bool { + return a.UseShortOptionHandling +} + // Run is the entry point to the cli app. Parses the arguments slice and routes // to the proper flag/args combination func (a *App) Run(arguments []string) (err error) { @@ -186,19 +199,17 @@ func (a *App) Run(arguments []string) (err error) { // always appends the completion flag at the end of the command shellComplete, arguments := checkShellCompleteFlag(a, arguments) - // parse flags - set, err := flagSet(a.Name, a.Flags) + _, err = a.newFlagSet() if err != nil { return err } - set.SetOutput(ioutil.Discard) - err = set.Parse(arguments[1:]) + set, err := parseIter(a, arguments[1:]) nerr := normalizeFlags(a.Flags, set) context := NewContext(a, set, nil) if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - ShowAppHelp(context) + _, _ = fmt.Fprintln(a.Writer, nerr) + _ = ShowAppHelp(context) return nerr } context.shellComplete = shellComplete @@ -213,13 +224,13 @@ func (a *App) Run(arguments []string) (err error) { a.handleExitCoder(context, err) return err } - fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) - ShowAppHelp(context) + _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + _ = ShowAppHelp(context) return err } if !a.HideHelp && checkHelp(context) { - ShowAppHelp(context) + _ = ShowAppHelp(context) return nil } @@ -228,6 +239,12 @@ func (a *App) Run(arguments []string) (err error) { return nil } + cerr := checkRequiredFlags(a.Flags, context) + if cerr != nil { + _ = ShowAppHelp(context) + return cerr + } + if a.After != nil { defer func() { if afterErr := a.After(context); afterErr != nil { @@ -243,8 +260,8 @@ func (a *App) Run(arguments []string) (err error) { if a.Before != nil { beforeErr := a.Before(context) if beforeErr != nil { - fmt.Fprintf(a.Writer, "%v\n\n", beforeErr) - ShowAppHelp(context) + _, _ = fmt.Fprintf(a.Writer, "%v\n\n", beforeErr) + _ = ShowAppHelp(context) a.handleExitCoder(context, beforeErr) err = beforeErr return err @@ -278,7 +295,7 @@ func (a *App) Run(arguments []string) (err error) { // code in the cli.ExitCoder func (a *App) RunAndExitOnError() { if err := a.Run(os.Args); err != nil { - fmt.Fprintln(a.errWriter(), err) + _, _ = fmt.Fprintln(a.errWriter(), err) OsExiter(1) } } @@ -305,24 +322,22 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { } a.Commands = newCmds - // parse flags - set, err := flagSet(a.Name, a.Flags) + _, err = a.newFlagSet() if err != nil { return err } - set.SetOutput(ioutil.Discard) - err = set.Parse(ctx.Args().Tail()) + set, err := parseIter(a, ctx.Args().Tail()) nerr := normalizeFlags(a.Flags, set) context := NewContext(a, set, ctx) if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - fmt.Fprintln(a.Writer) + _, _ = fmt.Fprintln(a.Writer, nerr) + _, _ = fmt.Fprintln(a.Writer) if len(a.Commands) > 0 { - ShowSubcommandHelp(context) + _ = ShowSubcommandHelp(context) } else { - ShowCommandHelp(ctx, context.Args().First()) + _ = ShowCommandHelp(ctx, context.Args().First()) } return nerr } @@ -337,8 +352,8 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { a.handleExitCoder(context, err) return err } - fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) - ShowSubcommandHelp(context) + _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + _ = ShowSubcommandHelp(context) return err } @@ -352,6 +367,12 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { } } + cerr := checkRequiredFlags(a.Flags, context) + if cerr != nil { + _ = ShowSubcommandHelp(context) + return cerr + } + if a.After != nil { defer func() { afterErr := a.After(context) @@ -428,7 +449,7 @@ func (a *App) VisibleCategories() []*CommandCategory { // VisibleCommands returns a slice of the Commands with Hidden=false func (a *App) VisibleCommands() []Command { - ret := []Command{} + var ret []Command for _, command := range a.Commands { if !command.Hidden { ret = append(ret, command) @@ -453,7 +474,6 @@ func (a *App) hasFlag(flag Flag) bool { } func (a *App) errWriter() io.Writer { - // When the app ErrWriter is nil use the package level one. if a.ErrWriter == nil { return ErrWriter @@ -496,11 +516,12 @@ func (a Author) String() string { // it's an ActionFunc or a func with the legacy signature for Action, the func // is run! func HandleAction(action interface{}, context *Context) (err error) { - if a, ok := action.(ActionFunc); ok { + switch a := action.(type) { + case ActionFunc: return a(context) - } else if a, ok := action.(func(*Context) error); ok { + case func(*Context) error: return a(context) - } else if a, ok := action.(func(*Context)); ok { // deprecated function signature + case func(*Context): // deprecated function signature a(context) return nil } diff --git a/vendor/github.com/urfave/cli/category.go b/vendor/github.com/urfave/cli/category.go index 1a6055023e72..bf3c73c55ee2 100644 --- a/vendor/github.com/urfave/cli/category.go +++ b/vendor/github.com/urfave/cli/category.go @@ -10,7 +10,7 @@ type CommandCategory struct { } func (c CommandCategories) Less(i, j int) bool { - return c[i].Name < c[j].Name + return lexicographicLess(c[i].Name, c[j].Name) } func (c CommandCategories) Len() int { diff --git a/vendor/github.com/urfave/cli/cli.go b/vendor/github.com/urfave/cli/cli.go index 90c07eb8ef94..4bd250839244 100644 --- a/vendor/github.com/urfave/cli/cli.go +++ b/vendor/github.com/urfave/cli/cli.go @@ -19,4 +19,4 @@ // } package cli -//go:generate python ./generate-flag-types cli -i flag-types.json -o flag_generated.go +//go:generate go run flag-gen/main.go flag-gen/assets_vfsdata.go diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go index 502fc9f30176..44a90de6b731 100644 --- a/vendor/github.com/urfave/cli/command.go +++ b/vendor/github.com/urfave/cli/command.go @@ -1,8 +1,8 @@ package cli import ( + "flag" "fmt" - "io/ioutil" "sort" "strings" ) @@ -55,6 +55,10 @@ type Command struct { HideHelp bool // Boolean to hide this command from help or completion Hidden bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool // Full name of command for help, defaults to full command name, including parent commands. HelpName string @@ -73,7 +77,7 @@ func (c CommandsByName) Len() int { } func (c CommandsByName) Less(i, j int) bool { - return c[i].Name < c[j].Name + return lexicographicLess(c[i].Name, c[j].Name) } func (c CommandsByName) Swap(i, j int) { @@ -106,57 +110,11 @@ func (c Command) Run(ctx *Context) (err error) { ) } - set, err := flagSet(c.Name, c.Flags) - if err != nil { - return err - } - set.SetOutput(ioutil.Discard) - - if c.SkipFlagParsing { - err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) - } else if !c.SkipArgReorder { - firstFlagIndex := -1 - terminatorIndex := -1 - for index, arg := range ctx.Args() { - if arg == "--" { - terminatorIndex = index - break - } else if arg == "-" { - // Do nothing. A dash alone is not really a flag. - continue - } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { - firstFlagIndex = index - } - } - - if firstFlagIndex > -1 { - args := ctx.Args() - regularArgs := make([]string, len(args[1:firstFlagIndex])) - copy(regularArgs, args[1:firstFlagIndex]) - - var flagArgs []string - if terminatorIndex > -1 { - flagArgs = args[firstFlagIndex:terminatorIndex] - regularArgs = append(regularArgs, args[terminatorIndex:]...) - } else { - flagArgs = args[firstFlagIndex:] - } - - err = set.Parse(append(flagArgs, regularArgs...)) - } else { - err = set.Parse(ctx.Args().Tail()) - } - } else { - err = set.Parse(ctx.Args().Tail()) + if ctx.App.UseShortOptionHandling { + c.UseShortOptionHandling = true } - nerr := normalizeFlags(c.Flags, set) - if nerr != nil { - fmt.Fprintln(ctx.App.Writer, nerr) - fmt.Fprintln(ctx.App.Writer) - ShowCommandHelp(ctx, c.Name) - return nerr - } + set, err := c.parseFlags(ctx.Args().Tail()) context := NewContext(ctx.App, set, ctx) context.Command = c @@ -170,9 +128,9 @@ func (c Command) Run(ctx *Context) (err error) { context.App.handleExitCoder(context, err) return err } - fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error()) - fmt.Fprintln(context.App.Writer) - ShowCommandHelp(context, c.Name) + _, _ = fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error()) + _, _ = fmt.Fprintln(context.App.Writer) + _ = ShowCommandHelp(context, c.Name) return err } @@ -180,6 +138,12 @@ func (c Command) Run(ctx *Context) (err error) { return nil } + cerr := checkRequiredFlags(c.Flags, context) + if cerr != nil { + _ = ShowCommandHelp(context, c.Name) + return cerr + } + if c.After != nil { defer func() { afterErr := c.After(context) @@ -197,7 +161,7 @@ func (c Command) Run(ctx *Context) (err error) { if c.Before != nil { err = c.Before(context) if err != nil { - ShowCommandHelp(context, c.Name) + _ = ShowCommandHelp(context, c.Name) context.App.handleExitCoder(context, err) return err } @@ -215,6 +179,71 @@ func (c Command) Run(ctx *Context) (err error) { return err } +func (c *Command) parseFlags(args Args) (*flag.FlagSet, error) { + if c.SkipFlagParsing { + set, err := c.newFlagSet() + if err != nil { + return nil, err + } + + return set, set.Parse(append([]string{"--"}, args...)) + } + + if !c.SkipArgReorder { + args = reorderArgs(args) + } + + set, err := parseIter(c, args) + if err != nil { + return nil, err + } + + err = normalizeFlags(c.Flags, set) + if err != nil { + return nil, err + } + + return set, nil +} + +func (c *Command) newFlagSet() (*flag.FlagSet, error) { + return flagSet(c.Name, c.Flags) +} + +func (c *Command) useShortOptionHandling() bool { + return c.UseShortOptionHandling +} + +// reorderArgs moves all flags before arguments as this is what flag expects +func reorderArgs(args []string) []string { + var nonflags, flags []string + + readFlagValue := false + for i, arg := range args { + if arg == "--" { + nonflags = append(nonflags, args[i:]...) + break + } + + if readFlagValue && !strings.HasPrefix(arg, "-") && !strings.HasPrefix(arg, "--") { + readFlagValue = false + flags = append(flags, arg) + continue + } + readFlagValue = false + + if arg != "-" && strings.HasPrefix(arg, "-") { + flags = append(flags, arg) + + readFlagValue = !strings.Contains(arg, "=") + } else { + nonflags = append(nonflags, arg) + } + } + + return append(flags, nonflags...) +} + // Names returns the names including short names and aliases. func (c Command) Names() []string { names := []string{c.Name} @@ -239,6 +268,7 @@ func (c Command) HasName(name string) bool { func (c Command) startApp(ctx *Context) error { app := NewApp() app.Metadata = ctx.App.Metadata + app.ExitErrHandler = ctx.App.ExitErrHandler // set the name and usage app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) if c.HelpName == "" { @@ -267,6 +297,7 @@ func (c Command) startApp(ctx *Context) error { app.Email = ctx.App.Email app.Writer = ctx.App.Writer app.ErrWriter = ctx.App.ErrWriter + app.UseShortOptionHandling = ctx.App.UseShortOptionHandling app.categories = CommandCategories{} for _, command := range c.Subcommands { diff --git a/vendor/github.com/urfave/cli/context.go b/vendor/github.com/urfave/cli/context.go index 012b9b5869a6..db7cd6977e0a 100644 --- a/vendor/github.com/urfave/cli/context.go +++ b/vendor/github.com/urfave/cli/context.go @@ -3,6 +3,8 @@ package cli import ( "errors" "flag" + "fmt" + "os" "reflect" "strings" "syscall" @@ -93,18 +95,26 @@ func (c *Context) IsSet(name string) bool { val = val.Elem() } - envVarValue := val.FieldByName("EnvVar") - if !envVarValue.IsValid() { - return + filePathValue := val.FieldByName("FilePath") + if filePathValue.IsValid() { + eachName(filePathValue.String(), func(filePath string) { + if _, err := os.Stat(filePath); err == nil { + c.setFlags[name] = true + return + } + }) } - eachName(envVarValue.String(), func(envVar string) { - envVar = strings.TrimSpace(envVar) - if _, ok := syscall.Getenv(envVar); ok { - c.setFlags[name] = true - return - } - }) + envVarValue := val.FieldByName("EnvVar") + if envVarValue.IsValid() { + eachName(envVarValue.String(), func(envVar string) { + envVar = strings.TrimSpace(envVar) + if _, ok := syscall.Getenv(envVar); ok { + c.setFlags[name] = true + return + } + }) + } }) } } @@ -129,8 +139,8 @@ func (c *Context) GlobalIsSet(name string) bool { // FlagNames returns a slice of flag names used in this context. func (c *Context) FlagNames() (names []string) { - for _, flag := range c.Command.Flags { - name := strings.Split(flag.GetName(), ",")[0] + for _, f := range c.Command.Flags { + name := strings.Split(f.GetName(), ",")[0] if name == "help" { continue } @@ -141,8 +151,8 @@ func (c *Context) FlagNames() (names []string) { // GlobalFlagNames returns a slice of global flag names used by the app. func (c *Context) GlobalFlagNames() (names []string) { - for _, flag := range c.App.Flags { - name := strings.Split(flag.GetName(), ",")[0] + for _, f := range c.App.Flags { + name := strings.Split(f.GetName(), ",")[0] if name == "help" || name == "version" { continue } @@ -240,7 +250,7 @@ func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { switch ff.Value.(type) { case *StringSlice: default: - set.Set(name, ff.Value.String()) + _ = set.Set(name, ff.Value.String()) } } @@ -276,3 +286,43 @@ func normalizeFlags(flags []Flag, set *flag.FlagSet) error { } return nil } + +type requiredFlagsErr interface { + error + getMissingFlags() []string +} + +type errRequiredFlags struct { + missingFlags []string +} + +func (e *errRequiredFlags) Error() string { + numberOfMissingFlags := len(e.missingFlags) + if numberOfMissingFlags == 1 { + return fmt.Sprintf("Required flag %q not set", e.missingFlags[0]) + } + joinedMissingFlags := strings.Join(e.missingFlags, ", ") + return fmt.Sprintf("Required flags %q not set", joinedMissingFlags) +} + +func (e *errRequiredFlags) getMissingFlags() []string { + return e.missingFlags +} + +func checkRequiredFlags(flags []Flag, context *Context) requiredFlagsErr { + var missingFlags []string + for _, f := range flags { + if rf, ok := f.(RequiredFlag); ok && rf.IsRequired() { + key := strings.Split(f.GetName(), ",")[0] + if !context.IsSet(key) { + missingFlags = append(missingFlags, key) + } + } + } + + if len(missingFlags) != 0 { + return &errRequiredFlags{missingFlags: missingFlags} + } + + return nil +} diff --git a/vendor/github.com/urfave/cli/docs.go b/vendor/github.com/urfave/cli/docs.go new file mode 100644 index 000000000000..5b9456612876 --- /dev/null +++ b/vendor/github.com/urfave/cli/docs.go @@ -0,0 +1,148 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "sort" + "strings" + "text/template" + + "github.com/cpuguy83/go-md2man/v2/md2man" +) + +// ToMarkdown creates a markdown string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMarkdown() (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w); err != nil { + return "", err + } + return w.String(), nil +} + +// ToMan creates a man page string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMan() (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w); err != nil { + return "", err + } + man := md2man.Render(w.Bytes()) + return string(man), nil +} + +type cliTemplate struct { + App *App + Commands []string + GlobalArgs []string + SynopsisArgs []string +} + +func (a *App) writeDocTemplate(w io.Writer) error { + const name = "cli" + t, err := template.New(name).Parse(MarkdownDocTemplate) + if err != nil { + return err + } + return t.ExecuteTemplate(w, name, &cliTemplate{ + App: a, + Commands: prepareCommands(a.Commands, 0), + GlobalArgs: prepareArgsWithValues(a.Flags), + SynopsisArgs: prepareArgsSynopsis(a.Flags), + }) +} + +func prepareCommands(commands []Command, level int) []string { + coms := []string{} + for i := range commands { + command := &commands[i] + if command.Hidden { + continue + } + usage := "" + if command.Usage != "" { + usage = command.Usage + } + + prepared := fmt.Sprintf("%s %s\n\n%s\n", + strings.Repeat("#", level+2), + strings.Join(command.Names(), ", "), + usage, + ) + + flags := prepareArgsWithValues(command.Flags) + if len(flags) > 0 { + prepared += fmt.Sprintf("\n%s", strings.Join(flags, "\n")) + } + + coms = append(coms, prepared) + + // recursevly iterate subcommands + if len(command.Subcommands) > 0 { + coms = append( + coms, + prepareCommands(command.Subcommands, level+1)..., + ) + } + } + + return coms +} + +func prepareArgsWithValues(flags []Flag) []string { + return prepareFlags(flags, ", ", "**", "**", `""`, true) +} + +func prepareArgsSynopsis(flags []Flag) []string { + return prepareFlags(flags, "|", "[", "]", "[value]", false) +} + +func prepareFlags( + flags []Flag, + sep, opener, closer, value string, + addDetails bool, +) []string { + args := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + modifiedArg := opener + for _, s := range strings.Split(flag.GetName(), ",") { + trimmed := strings.TrimSpace(s) + if len(modifiedArg) > len(opener) { + modifiedArg += sep + } + if len(trimmed) > 1 { + modifiedArg += fmt.Sprintf("--%s", trimmed) + } else { + modifiedArg += fmt.Sprintf("-%s", trimmed) + } + } + modifiedArg += closer + if flag.TakesValue() { + modifiedArg += fmt.Sprintf("=%s", value) + } + + if addDetails { + modifiedArg += flagDetails(flag) + } + + args = append(args, modifiedArg+"\n") + + } + sort.Strings(args) + return args +} + +// flagDetails returns a string containing the flags metadata +func flagDetails(flag DocGenerationFlag) string { + description := flag.GetUsage() + value := flag.GetValue() + if value != "" { + description += " (default: " + value + ")" + } + return ": " + description +} diff --git a/vendor/github.com/urfave/cli/fish.go b/vendor/github.com/urfave/cli/fish.go new file mode 100644 index 000000000000..cf183af611f1 --- /dev/null +++ b/vendor/github.com/urfave/cli/fish.go @@ -0,0 +1,194 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// ToFishCompletion creates a fish completion string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToFishCompletion() (string, error) { + var w bytes.Buffer + if err := a.writeFishCompletionTemplate(&w); err != nil { + return "", err + } + return w.String(), nil +} + +type fishCompletionTemplate struct { + App *App + Completions []string + AllCommands []string +} + +func (a *App) writeFishCompletionTemplate(w io.Writer) error { + const name = "cli" + t, err := template.New(name).Parse(FishCompletionTemplate) + if err != nil { + return err + } + allCommands := []string{} + + // Add global flags + completions := a.prepareFishFlags(a.VisibleFlags(), allCommands) + + // Add help flag + if !a.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, allCommands)..., + ) + } + + // Add version flag + if !a.HideVersion { + completions = append( + completions, + a.prepareFishFlags([]Flag{VersionFlag}, allCommands)..., + ) + } + + // Add commands and their flags + completions = append( + completions, + a.prepareFishCommands(a.VisibleCommands(), &allCommands, []string{})..., + ) + + return t.ExecuteTemplate(w, name, &fishCompletionTemplate{ + App: a, + Completions: completions, + AllCommands: allCommands, + }) +} + +func (a *App) prepareFishCommands(commands []Command, allCommands *[]string, previousCommands []string) []string { + completions := []string{} + for i := range commands { + command := &commands[i] + + if command.Hidden { + continue + } + + var completion strings.Builder + completion.WriteString(fmt.Sprintf( + "complete -r -c %s -n '%s' -a '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + strings.Join(command.Names(), " "), + )) + + if command.Usage != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(command.Usage))) + } + + if !command.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, command.Names())..., + ) + } + + *allCommands = append(*allCommands, command.Names()...) + completions = append(completions, completion.String()) + completions = append( + completions, + a.prepareFishFlags(command.Flags, command.Names())..., + ) + + // recursevly iterate subcommands + if len(command.Subcommands) > 0 { + completions = append( + completions, + a.prepareFishCommands( + command.Subcommands, allCommands, command.Names(), + )..., + ) + } + } + + return completions +} + +func (a *App) prepareFishFlags(flags []Flag, previousCommands []string) []string { + completions := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + + completion := &strings.Builder{} + completion.WriteString(fmt.Sprintf( + "complete -c %s -n '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + )) + + fishAddFileFlag(f, completion) + + for idx, opt := range strings.Split(flag.GetName(), ",") { + if idx == 0 { + completion.WriteString(fmt.Sprintf( + " -l %s", strings.TrimSpace(opt), + )) + } else { + completion.WriteString(fmt.Sprintf( + " -s %s", strings.TrimSpace(opt), + )) + + } + } + + if flag.TakesValue() { + completion.WriteString(" -r") + } + + if flag.GetUsage() != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(flag.GetUsage()))) + } + + completions = append(completions, completion.String()) + } + + return completions +} + +func fishAddFileFlag(flag Flag, completion *strings.Builder) { + switch f := flag.(type) { + case GenericFlag: + if f.TakesFile { + return + } + case StringFlag: + if f.TakesFile { + return + } + case StringSliceFlag: + if f.TakesFile { + return + } + } + completion.WriteString(" -f") +} + +func (a *App) fishSubcommandHelper(allCommands []string) string { + fishHelper := fmt.Sprintf("__fish_%s_no_subcommand", a.Name) + if len(allCommands) > 0 { + fishHelper = fmt.Sprintf( + "__fish_seen_subcommand_from %s", + strings.Join(allCommands, " "), + ) + } + return fishHelper + +} + +func escapeSingleQuotes(input string) string { + return strings.Replace(input, `'`, `\'`, -1) +} diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go index b17f5b9b6f76..3c052707781e 100644 --- a/vendor/github.com/urfave/cli/flag.go +++ b/vendor/github.com/urfave/cli/flag.go @@ -3,12 +3,12 @@ package cli import ( "flag" "fmt" + "io/ioutil" "reflect" "runtime" "strconv" "strings" "syscall" - "time" ) const defaultPlaceholder = "value" @@ -45,6 +45,10 @@ var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames // details. This is used by the default FlagStringer. var FlagEnvHinter FlagEnvHintFunc = withEnvHint +// FlagFileHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagFileHinter FlagFileHintFunc = withFileHint + // FlagsByName is a slice of Flag. type FlagsByName []Flag @@ -53,7 +57,7 @@ func (f FlagsByName) Len() int { } func (f FlagsByName) Less(i, j int) bool { - return f[i].GetName() < f[j].GetName() + return lexicographicLess(f[i].GetName(), f[j].GetName()) } func (f FlagsByName) Swap(i, j int) { @@ -70,6 +74,29 @@ type Flag interface { GetName() string } +// RequiredFlag is an interface that allows us to mark flags as required +// it allows flags required flags to be backwards compatible with the Flag interface +type RequiredFlag interface { + Flag + + IsRequired() bool +} + +// DocGenerationFlag is an interface that allows documentation generation for the flag +type DocGenerationFlag interface { + Flag + + // TakesValue returns true of the flag takes a value, otherwise false + TakesValue() bool + + // GetUsage returns the usage string for the flag + GetUsage() string + + // GetValue returns the flags value as string representation and an empty + // string if the flag takes no value at all. + GetValue() string +} + // errorableFlag is an interface that allows us to return errors during apply // it allows flags defined in this library to return errors in a fashion backwards compatible // TODO remove in v2 and modify the existing Flag interface to return errors @@ -92,6 +119,7 @@ func flagSet(name string, flags []Flag) (*flag.FlagSet, error) { f.Apply(set) } } + set.SetOutput(ioutil.Discard) return set, nil } @@ -103,544 +131,12 @@ func eachName(longName string, fn func(string)) { } } -// Generic is a generic parseable type identified by a specific flag -type Generic interface { - Set(value string) error - String() string -} - -// Apply takes the flagset and calls Set on the generic flag with the value -// provided by the user for parsing by the flag -// Ignores parsing errors -func (f GenericFlag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError takes the flagset and calls Set on the generic flag with the value -// provided by the user for parsing by the flag -func (f GenericFlag) ApplyWithError(set *flag.FlagSet) error { - val := f.Value - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - if err := val.Set(envVal); err != nil { - return fmt.Errorf("could not parse %s as value for flag %s: %s", envVal, f.Name, err) - } - break - } - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) - - return nil -} - -// StringSlice is an opaque type for []string to satisfy flag.Value and flag.Getter -type StringSlice []string - -// Set appends the string value to the list of values -func (f *StringSlice) Set(value string) error { - *f = append(*f, value) - return nil -} - -// String returns a readable representation of this value (for usage defaults) -func (f *StringSlice) String() string { - return fmt.Sprintf("%s", *f) -} - -// Value returns the slice of strings set by this flag -func (f *StringSlice) Value() []string { - return *f -} - -// Get returns the slice of strings set by this flag -func (f *StringSlice) Get() interface{} { - return *f -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f StringSliceFlag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f StringSliceFlag) ApplyWithError(set *flag.FlagSet) error { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - newVal := &StringSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - if err := newVal.Set(s); err != nil { - return fmt.Errorf("could not parse %s as string value for flag %s: %s", envVal, f.Name, err) - } - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Value == nil { - f.Value = &StringSlice{} - } - set.Var(f.Value, name, f.Usage) - }) - - return nil -} - -// IntSlice is an opaque type for []int to satisfy flag.Value and flag.Getter -type IntSlice []int - -// Set parses the value into an integer and appends it to the list of values -func (f *IntSlice) Set(value string) error { - tmp, err := strconv.Atoi(value) - if err != nil { - return err - } - *f = append(*f, tmp) - return nil -} - -// String returns a readable representation of this value (for usage defaults) -func (f *IntSlice) String() string { - return fmt.Sprintf("%#v", *f) -} - -// Value returns the slice of ints set by this flag -func (f *IntSlice) Value() []int { - return *f -} - -// Get returns the slice of ints set by this flag -func (f *IntSlice) Get() interface{} { - return *f -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f IntSliceFlag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f IntSliceFlag) ApplyWithError(set *flag.FlagSet) error { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - newVal := &IntSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - if err := newVal.Set(s); err != nil { - return fmt.Errorf("could not parse %s as int slice value for flag %s: %s", envVal, f.Name, err) - } - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Value == nil { - f.Value = &IntSlice{} - } - set.Var(f.Value, name, f.Usage) - }) - - return nil -} - -// Int64Slice is an opaque type for []int to satisfy flag.Value and flag.Getter -type Int64Slice []int64 - -// Set parses the value into an integer and appends it to the list of values -func (f *Int64Slice) Set(value string) error { - tmp, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return err - } - *f = append(*f, tmp) - return nil -} - -// String returns a readable representation of this value (for usage defaults) -func (f *Int64Slice) String() string { - return fmt.Sprintf("%#v", *f) -} - -// Value returns the slice of ints set by this flag -func (f *Int64Slice) Value() []int64 { - return *f -} - -// Get returns the slice of ints set by this flag -func (f *Int64Slice) Get() interface{} { - return *f -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f Int64SliceFlag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f Int64SliceFlag) ApplyWithError(set *flag.FlagSet) error { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - newVal := &Int64Slice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - if err := newVal.Set(s); err != nil { - return fmt.Errorf("could not parse %s as int64 slice value for flag %s: %s", envVal, f.Name, err) - } - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Value == nil { - f.Value = &Int64Slice{} - } - set.Var(f.Value, name, f.Usage) - }) - return nil -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f BoolFlag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f BoolFlag) ApplyWithError(set *flag.FlagSet) error { - val := false - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - if envVal == "" { - val = false - break - } - - envValBool, err := strconv.ParseBool(envVal) - if err != nil { - return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) - } - - val = envValBool - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.BoolVar(f.Destination, name, val, f.Usage) - return - } - set.Bool(name, val, f.Usage) - }) - - return nil -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f BoolTFlag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f BoolTFlag) ApplyWithError(set *flag.FlagSet) error { - val := true - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - if envVal == "" { - val = false - break - } - - envValBool, err := strconv.ParseBool(envVal) - if err != nil { - return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) - } - - val = envValBool - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.BoolVar(f.Destination, name, val, f.Usage) - return - } - set.Bool(name, val, f.Usage) - }) - - return nil -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f StringFlag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f StringFlag) ApplyWithError(set *flag.FlagSet) error { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - f.Value = envVal - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.StringVar(f.Destination, name, f.Value, f.Usage) - return - } - set.String(name, f.Value, f.Usage) - }) - - return nil -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f IntFlag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f IntFlag) ApplyWithError(set *flag.FlagSet) error { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - envValInt, err := strconv.ParseInt(envVal, 0, 64) - if err != nil { - return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) - } - f.Value = int(envValInt) - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.IntVar(f.Destination, name, f.Value, f.Usage) - return - } - set.Int(name, f.Value, f.Usage) - }) - - return nil -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f Int64Flag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f Int64Flag) ApplyWithError(set *flag.FlagSet) error { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - envValInt, err := strconv.ParseInt(envVal, 0, 64) - if err != nil { - return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) - } - - f.Value = envValInt - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.Int64Var(f.Destination, name, f.Value, f.Usage) - return - } - set.Int64(name, f.Value, f.Usage) - }) - - return nil -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f UintFlag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f UintFlag) ApplyWithError(set *flag.FlagSet) error { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - envValInt, err := strconv.ParseUint(envVal, 0, 64) - if err != nil { - return fmt.Errorf("could not parse %s as uint value for flag %s: %s", envVal, f.Name, err) - } - - f.Value = uint(envValInt) - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.UintVar(f.Destination, name, f.Value, f.Usage) - return - } - set.Uint(name, f.Value, f.Usage) - }) - - return nil -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f Uint64Flag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f Uint64Flag) ApplyWithError(set *flag.FlagSet) error { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - envValInt, err := strconv.ParseUint(envVal, 0, 64) - if err != nil { - return fmt.Errorf("could not parse %s as uint64 value for flag %s: %s", envVal, f.Name, err) - } - - f.Value = uint64(envValInt) - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.Uint64Var(f.Destination, name, f.Value, f.Usage) - return - } - set.Uint64(name, f.Value, f.Usage) - }) - - return nil -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f DurationFlag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f DurationFlag) ApplyWithError(set *flag.FlagSet) error { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - envValDuration, err := time.ParseDuration(envVal) - if err != nil { - return fmt.Errorf("could not parse %s as duration for flag %s: %s", envVal, f.Name, err) - } - - f.Value = envValDuration - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.DurationVar(f.Destination, name, f.Value, f.Usage) - return - } - set.Duration(name, f.Value, f.Usage) - }) - - return nil -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f Float64Flag) Apply(set *flag.FlagSet) { - f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f Float64Flag) ApplyWithError(set *flag.FlagSet) error { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - envValFloat, err := strconv.ParseFloat(envVal, 10) - if err != nil { - return fmt.Errorf("could not parse %s as float64 value for flag %s: %s", envVal, f.Name, err) - } - - f.Value = float64(envValFloat) - break - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.Float64Var(f.Destination, name, f.Value, f.Usage) - return - } - set.Float64(name, f.Value, f.Usage) - }) - - return nil -} - func visibleFlags(fl []Flag) []Flag { - visible := []Flag{} - for _, flag := range fl { - field := flagValue(flag).FieldByName("Hidden") + var visible []Flag + for _, f := range fl { + field := flagValue(f).FieldByName("Hidden") if !field.IsValid() || !field.Bool() { - visible = append(visible, flag) + visible = append(visible, f) } } return visible @@ -700,11 +196,19 @@ func withEnvHint(envVar, str string) string { suffix = "%" sep = "%, %" } - envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix) + envText = " [" + prefix + strings.Join(strings.Split(envVar, ","), sep) + suffix + "]" } return str + envText } +func withFileHint(filePath, str string) string { + fileText := "" + if filePath != "" { + fileText = fmt.Sprintf(" [%s]", filePath) + } + return str + fileText +} + func flagValue(f Flag) reflect.Value { fv := reflect.ValueOf(f) for fv.Kind() == reflect.Ptr { @@ -718,14 +222,29 @@ func stringifyFlag(f Flag) string { switch f.(type) { case IntSliceFlag: - return FlagEnvHinter(fv.FieldByName("EnvVar").String(), - stringifyIntSliceFlag(f.(IntSliceFlag))) + return FlagFileHinter( + fv.FieldByName("FilePath").String(), + FlagEnvHinter( + fv.FieldByName("EnvVar").String(), + stringifyIntSliceFlag(f.(IntSliceFlag)), + ), + ) case Int64SliceFlag: - return FlagEnvHinter(fv.FieldByName("EnvVar").String(), - stringifyInt64SliceFlag(f.(Int64SliceFlag))) + return FlagFileHinter( + fv.FieldByName("FilePath").String(), + FlagEnvHinter( + fv.FieldByName("EnvVar").String(), + stringifyInt64SliceFlag(f.(Int64SliceFlag)), + ), + ) case StringSliceFlag: - return FlagEnvHinter(fv.FieldByName("EnvVar").String(), - stringifyStringSliceFlag(f.(StringSliceFlag))) + return FlagFileHinter( + fv.FieldByName("FilePath").String(), + FlagEnvHinter( + fv.FieldByName("EnvVar").String(), + stringifyStringSliceFlag(f.(StringSliceFlag)), + ), + ) } placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) @@ -750,17 +269,22 @@ func stringifyFlag(f Flag) string { placeholder = defaultPlaceholder } - usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString)) + usageWithDefault := strings.TrimSpace(usage + defaultValueString) - return FlagEnvHinter(fv.FieldByName("EnvVar").String(), - fmt.Sprintf("%s\t%s", FlagNamePrefixer(fv.FieldByName("Name").String(), placeholder), usageWithDefault)) + return FlagFileHinter( + fv.FieldByName("FilePath").String(), + FlagEnvHinter( + fv.FieldByName("EnvVar").String(), + FlagNamePrefixer(fv.FieldByName("Name").String(), placeholder)+"\t"+usageWithDefault, + ), + ) } func stringifyIntSliceFlag(f IntSliceFlag) string { - defaultVals := []string{} + var defaultVals []string if f.Value != nil && len(f.Value.Value()) > 0 { for _, i := range f.Value.Value() { - defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + defaultVals = append(defaultVals, strconv.Itoa(i)) } } @@ -768,10 +292,10 @@ func stringifyIntSliceFlag(f IntSliceFlag) string { } func stringifyInt64SliceFlag(f Int64SliceFlag) string { - defaultVals := []string{} + var defaultVals []string if f.Value != nil && len(f.Value.Value()) > 0 { for _, i := range f.Value.Value() { - defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + defaultVals = append(defaultVals, strconv.FormatInt(i, 10)) } } @@ -779,11 +303,11 @@ func stringifyInt64SliceFlag(f Int64SliceFlag) string { } func stringifyStringSliceFlag(f StringSliceFlag) string { - defaultVals := []string{} + var defaultVals []string if f.Value != nil && len(f.Value.Value()) > 0 { for _, s := range f.Value.Value() { if len(s) > 0 { - defaultVals = append(defaultVals, fmt.Sprintf("%q", s)) + defaultVals = append(defaultVals, strconv.Quote(s)) } } } @@ -802,6 +326,21 @@ func stringifySliceFlag(usage, name string, defaultVals []string) string { defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", ")) } - usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal)) - return fmt.Sprintf("%s\t%s", FlagNamePrefixer(name, placeholder), usageWithDefault) + usageWithDefault := strings.TrimSpace(usage + defaultVal) + return FlagNamePrefixer(name, placeholder) + "\t" + usageWithDefault +} + +func flagFromFileEnv(filePath, envName string) (val string, ok bool) { + for _, envVar := range strings.Split(envName, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + return envVal, true + } + } + for _, fileVar := range strings.Split(filePath, ",") { + if data, err := ioutil.ReadFile(fileVar); err == nil { + return string(data), true + } + } + return "", false } diff --git a/vendor/github.com/urfave/cli/flag_bool.go b/vendor/github.com/urfave/cli/flag_bool.go new file mode 100644 index 000000000000..2499b0b524af --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_bool.go @@ -0,0 +1,109 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// BoolFlag is a flag with type bool +type BoolFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f BoolFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f BoolFlag) TakesValue() bool { + return false +} + +// GetUsage returns the usage string for the flag +func (f BoolFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f BoolFlag) GetValue() string { + return "" +} + +// Bool looks up the value of a local BoolFlag, returns +// false if not found +func (c *Context) Bool(name string) bool { + return lookupBool(name, c.flagSet) +} + +// GlobalBool looks up the value of a global BoolFlag, returns +// false if not found +func (c *Context) GlobalBool(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f BoolFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolFlag) ApplyWithError(set *flag.FlagSet) error { + val := false + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + if envVal == "" { + val = false + } else { + envValBool, err := strconv.ParseBool(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) + } + val = envValBool + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) + + return nil +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} diff --git a/vendor/github.com/urfave/cli/flag_bool_t.go b/vendor/github.com/urfave/cli/flag_bool_t.go new file mode 100644 index 000000000000..cd0888fa21ea --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_bool_t.go @@ -0,0 +1,110 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// BoolTFlag is a flag with type bool that is true by default +type BoolTFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolTFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolTFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f BoolTFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f BoolTFlag) TakesValue() bool { + return false +} + +// GetUsage returns the usage string for the flag +func (f BoolTFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f BoolTFlag) GetValue() string { + return "" +} + +// BoolT looks up the value of a local BoolTFlag, returns +// false if not found +func (c *Context) BoolT(name string) bool { + return lookupBoolT(name, c.flagSet) +} + +// GlobalBoolT looks up the value of a global BoolTFlag, returns +// false if not found +func (c *Context) GlobalBoolT(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBoolT(name, fs) + } + return false +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f BoolTFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolTFlag) ApplyWithError(set *flag.FlagSet) error { + val := true + + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + if envVal == "" { + val = false + } else { + envValBool, err := strconv.ParseBool(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) + } + val = envValBool + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) + + return nil +} + +func lookupBoolT(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} diff --git a/vendor/github.com/urfave/cli/flag_duration.go b/vendor/github.com/urfave/cli/flag_duration.go new file mode 100644 index 000000000000..df4ade589d04 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_duration.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "time" +) + +// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) +type DurationFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value time.Duration + Destination *time.Duration +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f DurationFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f DurationFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f DurationFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f DurationFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f DurationFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f DurationFlag) GetValue() string { + return f.Value.String() +} + +// Duration looks up the value of a local DurationFlag, returns +// 0 if not found +func (c *Context) Duration(name string) time.Duration { + return lookupDuration(name, c.flagSet) +} + +// GlobalDuration looks up the value of a global DurationFlag, returns +// 0 if not found +func (c *Context) GlobalDuration(name string) time.Duration { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f DurationFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f DurationFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValDuration, err := time.ParseDuration(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as duration for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValDuration + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Duration(name, f.Value, f.Usage) + }) + + return nil +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + parsed, err := time.ParseDuration(f.Value.String()) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/flag_float64.go b/vendor/github.com/urfave/cli/flag_float64.go new file mode 100644 index 000000000000..65398d3b5c37 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_float64.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Float64Flag is a flag with type float64 +type Float64Flag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value float64 + Destination *float64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Float64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Float64Flag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f Float64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f Float64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f Float64Flag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f Float64Flag) GetValue() string { + return fmt.Sprintf("%f", f.Value) +} + +// Float64 looks up the value of a local Float64Flag, returns +// 0 if not found +func (c *Context) Float64(name string) float64 { + return lookupFloat64(name, c.flagSet) +} + +// GlobalFloat64 looks up the value of a global Float64Flag, returns +// 0 if not found +func (c *Context) GlobalFloat64(name string) float64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupFloat64(name, fs) + } + return 0 +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Float64Flag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Float64Flag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValFloat, err := strconv.ParseFloat(envVal, 10) + if err != nil { + return fmt.Errorf("could not parse %s as float64 value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValFloat + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Float64(name, f.Value, f.Usage) + }) + + return nil +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/flag_generated.go b/vendor/github.com/urfave/cli/flag_generated.go deleted file mode 100644 index 491b61956c76..000000000000 --- a/vendor/github.com/urfave/cli/flag_generated.go +++ /dev/null @@ -1,627 +0,0 @@ -package cli - -import ( - "flag" - "strconv" - "time" -) - -// WARNING: This file is generated! - -// BoolFlag is a flag with type bool -type BoolFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Destination *bool -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f BoolFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f BoolFlag) GetName() string { - return f.Name -} - -// Bool looks up the value of a local BoolFlag, returns -// false if not found -func (c *Context) Bool(name string) bool { - return lookupBool(name, c.flagSet) -} - -// GlobalBool looks up the value of a global BoolFlag, returns -// false if not found -func (c *Context) GlobalBool(name string) bool { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupBool(name, fs) - } - return false -} - -func lookupBool(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return false - } - return parsed - } - return false -} - -// BoolTFlag is a flag with type bool that is true by default -type BoolTFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Destination *bool -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f BoolTFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f BoolTFlag) GetName() string { - return f.Name -} - -// BoolT looks up the value of a local BoolTFlag, returns -// false if not found -func (c *Context) BoolT(name string) bool { - return lookupBoolT(name, c.flagSet) -} - -// GlobalBoolT looks up the value of a global BoolTFlag, returns -// false if not found -func (c *Context) GlobalBoolT(name string) bool { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupBoolT(name, fs) - } - return false -} - -func lookupBoolT(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return false - } - return parsed - } - return false -} - -// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) -type DurationFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value time.Duration - Destination *time.Duration -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f DurationFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f DurationFlag) GetName() string { - return f.Name -} - -// Duration looks up the value of a local DurationFlag, returns -// 0 if not found -func (c *Context) Duration(name string) time.Duration { - return lookupDuration(name, c.flagSet) -} - -// GlobalDuration looks up the value of a global DurationFlag, returns -// 0 if not found -func (c *Context) GlobalDuration(name string) time.Duration { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupDuration(name, fs) - } - return 0 -} - -func lookupDuration(name string, set *flag.FlagSet) time.Duration { - f := set.Lookup(name) - if f != nil { - parsed, err := time.ParseDuration(f.Value.String()) - if err != nil { - return 0 - } - return parsed - } - return 0 -} - -// Float64Flag is a flag with type float64 -type Float64Flag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value float64 - Destination *float64 -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Float64Flag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Float64Flag) GetName() string { - return f.Name -} - -// Float64 looks up the value of a local Float64Flag, returns -// 0 if not found -func (c *Context) Float64(name string) float64 { - return lookupFloat64(name, c.flagSet) -} - -// GlobalFloat64 looks up the value of a global Float64Flag, returns -// 0 if not found -func (c *Context) GlobalFloat64(name string) float64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupFloat64(name, fs) - } - return 0 -} - -func lookupFloat64(name string, set *flag.FlagSet) float64 { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseFloat(f.Value.String(), 64) - if err != nil { - return 0 - } - return parsed - } - return 0 -} - -// GenericFlag is a flag with type Generic -type GenericFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value Generic -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f GenericFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f GenericFlag) GetName() string { - return f.Name -} - -// Generic looks up the value of a local GenericFlag, returns -// nil if not found -func (c *Context) Generic(name string) interface{} { - return lookupGeneric(name, c.flagSet) -} - -// GlobalGeneric looks up the value of a global GenericFlag, returns -// nil if not found -func (c *Context) GlobalGeneric(name string) interface{} { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupGeneric(name, fs) - } - return nil -} - -func lookupGeneric(name string, set *flag.FlagSet) interface{} { - f := set.Lookup(name) - if f != nil { - parsed, err := f.Value, error(nil) - if err != nil { - return nil - } - return parsed - } - return nil -} - -// Int64Flag is a flag with type int64 -type Int64Flag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value int64 - Destination *int64 -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Int64Flag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Int64Flag) GetName() string { - return f.Name -} - -// Int64 looks up the value of a local Int64Flag, returns -// 0 if not found -func (c *Context) Int64(name string) int64 { - return lookupInt64(name, c.flagSet) -} - -// GlobalInt64 looks up the value of a global Int64Flag, returns -// 0 if not found -func (c *Context) GlobalInt64(name string) int64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupInt64(name, fs) - } - return 0 -} - -func lookupInt64(name string, set *flag.FlagSet) int64 { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return parsed - } - return 0 -} - -// IntFlag is a flag with type int -type IntFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value int - Destination *int -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f IntFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f IntFlag) GetName() string { - return f.Name -} - -// Int looks up the value of a local IntFlag, returns -// 0 if not found -func (c *Context) Int(name string) int { - return lookupInt(name, c.flagSet) -} - -// GlobalInt looks up the value of a global IntFlag, returns -// 0 if not found -func (c *Context) GlobalInt(name string) int { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupInt(name, fs) - } - return 0 -} - -func lookupInt(name string, set *flag.FlagSet) int { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return int(parsed) - } - return 0 -} - -// IntSliceFlag is a flag with type *IntSlice -type IntSliceFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value *IntSlice -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f IntSliceFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f IntSliceFlag) GetName() string { - return f.Name -} - -// IntSlice looks up the value of a local IntSliceFlag, returns -// nil if not found -func (c *Context) IntSlice(name string) []int { - return lookupIntSlice(name, c.flagSet) -} - -// GlobalIntSlice looks up the value of a global IntSliceFlag, returns -// nil if not found -func (c *Context) GlobalIntSlice(name string) []int { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupIntSlice(name, fs) - } - return nil -} - -func lookupIntSlice(name string, set *flag.FlagSet) []int { - f := set.Lookup(name) - if f != nil { - parsed, err := (f.Value.(*IntSlice)).Value(), error(nil) - if err != nil { - return nil - } - return parsed - } - return nil -} - -// Int64SliceFlag is a flag with type *Int64Slice -type Int64SliceFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value *Int64Slice -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Int64SliceFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Int64SliceFlag) GetName() string { - return f.Name -} - -// Int64Slice looks up the value of a local Int64SliceFlag, returns -// nil if not found -func (c *Context) Int64Slice(name string) []int64 { - return lookupInt64Slice(name, c.flagSet) -} - -// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns -// nil if not found -func (c *Context) GlobalInt64Slice(name string) []int64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupInt64Slice(name, fs) - } - return nil -} - -func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { - f := set.Lookup(name) - if f != nil { - parsed, err := (f.Value.(*Int64Slice)).Value(), error(nil) - if err != nil { - return nil - } - return parsed - } - return nil -} - -// StringFlag is a flag with type string -type StringFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value string - Destination *string -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f StringFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f StringFlag) GetName() string { - return f.Name -} - -// String looks up the value of a local StringFlag, returns -// "" if not found -func (c *Context) String(name string) string { - return lookupString(name, c.flagSet) -} - -// GlobalString looks up the value of a global StringFlag, returns -// "" if not found -func (c *Context) GlobalString(name string) string { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupString(name, fs) - } - return "" -} - -func lookupString(name string, set *flag.FlagSet) string { - f := set.Lookup(name) - if f != nil { - parsed, err := f.Value.String(), error(nil) - if err != nil { - return "" - } - return parsed - } - return "" -} - -// StringSliceFlag is a flag with type *StringSlice -type StringSliceFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value *StringSlice -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f StringSliceFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f StringSliceFlag) GetName() string { - return f.Name -} - -// StringSlice looks up the value of a local StringSliceFlag, returns -// nil if not found -func (c *Context) StringSlice(name string) []string { - return lookupStringSlice(name, c.flagSet) -} - -// GlobalStringSlice looks up the value of a global StringSliceFlag, returns -// nil if not found -func (c *Context) GlobalStringSlice(name string) []string { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupStringSlice(name, fs) - } - return nil -} - -func lookupStringSlice(name string, set *flag.FlagSet) []string { - f := set.Lookup(name) - if f != nil { - parsed, err := (f.Value.(*StringSlice)).Value(), error(nil) - if err != nil { - return nil - } - return parsed - } - return nil -} - -// Uint64Flag is a flag with type uint64 -type Uint64Flag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value uint64 - Destination *uint64 -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Uint64Flag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Uint64Flag) GetName() string { - return f.Name -} - -// Uint64 looks up the value of a local Uint64Flag, returns -// 0 if not found -func (c *Context) Uint64(name string) uint64 { - return lookupUint64(name, c.flagSet) -} - -// GlobalUint64 looks up the value of a global Uint64Flag, returns -// 0 if not found -func (c *Context) GlobalUint64(name string) uint64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupUint64(name, fs) - } - return 0 -} - -func lookupUint64(name string, set *flag.FlagSet) uint64 { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return parsed - } - return 0 -} - -// UintFlag is a flag with type uint -type UintFlag struct { - Name string - Usage string - EnvVar string - Hidden bool - Value uint - Destination *uint -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f UintFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f UintFlag) GetName() string { - return f.Name -} - -// Uint looks up the value of a local UintFlag, returns -// 0 if not found -func (c *Context) Uint(name string) uint { - return lookupUint(name, c.flagSet) -} - -// GlobalUint looks up the value of a global UintFlag, returns -// 0 if not found -func (c *Context) GlobalUint(name string) uint { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupUint(name, fs) - } - return 0 -} - -func lookupUint(name string, set *flag.FlagSet) uint { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return uint(parsed) - } - return 0 -} diff --git a/vendor/github.com/urfave/cli/flag_generic.go b/vendor/github.com/urfave/cli/flag_generic.go new file mode 100644 index 000000000000..c43dae7d0b25 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_generic.go @@ -0,0 +1,110 @@ +package cli + +import ( + "flag" + "fmt" +) + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +// GenericFlag is a flag with type Generic +type GenericFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value Generic +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f GenericFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f GenericFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f GenericFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f GenericFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f GenericFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f GenericFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +// Ignores parsing errors +func (f GenericFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f GenericFlag) ApplyWithError(set *flag.FlagSet) error { + val := f.Value + if fileEnvVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + if err := val.Set(fileEnvVal); err != nil { + return fmt.Errorf("could not parse %s as value for flag %s: %s", fileEnvVal, f.Name, err) + } + } + + eachName(f.Name, func(name string) { + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// Generic looks up the value of a local GenericFlag, returns +// nil if not found +func (c *Context) Generic(name string) interface{} { + return lookupGeneric(name, c.flagSet) +} + +// GlobalGeneric looks up the value of a global GenericFlag, returns +// nil if not found +func (c *Context) GlobalGeneric(name string) interface{} { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value, error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} diff --git a/vendor/github.com/urfave/cli/flag_int.go b/vendor/github.com/urfave/cli/flag_int.go new file mode 100644 index 000000000000..bae32e2818e1 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_int.go @@ -0,0 +1,105 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// IntFlag is a flag with type int +type IntFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value int + Destination *int +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f IntFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f IntFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f IntFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f IntFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f IntFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f IntFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) + } + f.Value = int(envValInt) + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Int(name, f.Value, f.Usage) + }) + + return nil +} + +// Int looks up the value of a local IntFlag, returns +// 0 if not found +func (c *Context) Int(name string) int { + return lookupInt(name, c.flagSet) +} + +// GlobalInt looks up the value of a global IntFlag, returns +// 0 if not found +func (c *Context) GlobalInt(name string) int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return int(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/flag_int64.go b/vendor/github.com/urfave/cli/flag_int64.go new file mode 100644 index 000000000000..aaafbe9d6d59 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_int64.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Int64Flag is a flag with type int64 +type Int64Flag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value int64 + Destination *int64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64Flag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f Int64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f Int64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f Int64Flag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f Int64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Int64Flag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64Flag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValInt + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Int64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Int64(name, f.Value, f.Usage) + }) + + return nil +} + +// Int64 looks up the value of a local Int64Flag, returns +// 0 if not found +func (c *Context) Int64(name string) int64 { + return lookupInt64(name, c.flagSet) +} + +// GlobalInt64 looks up the value of a global Int64Flag, returns +// 0 if not found +func (c *Context) GlobalInt64(name string) int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64(name, fs) + } + return 0 +} + +func lookupInt64(name string, set *flag.FlagSet) int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/flag_int64_slice.go b/vendor/github.com/urfave/cli/flag_int64_slice.go new file mode 100644 index 000000000000..ed2e983b62e0 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_int64_slice.go @@ -0,0 +1,141 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" + "strings" +) + +// Int64Slice is an opaque type for []int to satisfy flag.Value and flag.Getter +type Int64Slice []int64 + +// Set parses the value into an integer and appends it to the list of values +func (f *Int64Slice) Set(value string) error { + tmp, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Int64Slice) String() string { + return fmt.Sprintf("%#v", *f) +} + +// Value returns the slice of ints set by this flag +func (f *Int64Slice) Value() []int64 { + return *f +} + +// Get returns the slice of ints set by this flag +func (f *Int64Slice) Get() interface{} { + return *f +} + +// Int64SliceFlag is a flag with type *Int64Slice +type Int64SliceFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value *Int64Slice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64SliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64SliceFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f Int64SliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f Int64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f Int64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f Int64SliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Int64SliceFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64SliceFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + newVal := &Int64Slice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int64 slice value for flag %s: %s", envVal, f.Name, err) + } + } + if f.Value == nil { + f.Value = newVal + } else { + *f.Value = *newVal + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &Int64Slice{} + } + set.Var(f.Value, name, f.Usage) + }) + return nil +} + +// Int64Slice looks up the value of a local Int64SliceFlag, returns +// nil if not found +func (c *Context) Int64Slice(name string) []int64 { + return lookupInt64Slice(name, c.flagSet) +} + +// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns +// nil if not found +func (c *Context) GlobalInt64Slice(name string) []int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64Slice(name, fs) + } + return nil +} + +func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*Int64Slice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} diff --git a/vendor/github.com/urfave/cli/flag_int_slice.go b/vendor/github.com/urfave/cli/flag_int_slice.go new file mode 100644 index 000000000000..c38d010fd075 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_int_slice.go @@ -0,0 +1,142 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" + "strings" +) + +// IntSlice is an opaque type for []int to satisfy flag.Value and flag.Getter +type IntSlice []int + +// Set parses the value into an integer and appends it to the list of values +func (f *IntSlice) Set(value string) error { + tmp, err := strconv.Atoi(value) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *IntSlice) String() string { + return fmt.Sprintf("%#v", *f) +} + +// Value returns the slice of ints set by this flag +func (f *IntSlice) Value() []int { + return *f +} + +// Get returns the slice of ints set by this flag +func (f *IntSlice) Get() interface{} { + return *f +} + +// IntSliceFlag is a flag with type *IntSlice +type IntSliceFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value *IntSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntSliceFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f IntSliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f IntSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f IntSliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f IntSliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f IntSliceFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f IntSliceFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + newVal := &IntSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int slice value for flag %s: %s", envVal, f.Name, err) + } + } + if f.Value == nil { + f.Value = newVal + } else { + *f.Value = *newVal + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &IntSlice{} + } + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// IntSlice looks up the value of a local IntSliceFlag, returns +// nil if not found +func (c *Context) IntSlice(name string) []int { + return lookupIntSlice(name, c.flagSet) +} + +// GlobalIntSlice looks up the value of a global IntSliceFlag, returns +// nil if not found +func (c *Context) GlobalIntSlice(name string) []int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupIntSlice(name, fs) + } + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*IntSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} diff --git a/vendor/github.com/urfave/cli/flag_string.go b/vendor/github.com/urfave/cli/flag_string.go new file mode 100644 index 000000000000..9f29da40b938 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_string.go @@ -0,0 +1,98 @@ +package cli + +import "flag" + +// StringFlag is a flag with type string +type StringFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value string + Destination *string +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f StringFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f StringFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f StringFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f StringFlag) GetValue() string { + return f.Value +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f StringFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + f.Value = envVal + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + return + } + set.String(name, f.Value, f.Usage) + }) + + return nil +} + +// String looks up the value of a local StringFlag, returns +// "" if not found +func (c *Context) String(name string) string { + return lookupString(name, c.flagSet) +} + +// GlobalString looks up the value of a global StringFlag, returns +// "" if not found +func (c *Context) GlobalString(name string) string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" +} + +func lookupString(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value.String(), error(nil) + if err != nil { + return "" + } + return parsed + } + return "" +} diff --git a/vendor/github.com/urfave/cli/flag_string_slice.go b/vendor/github.com/urfave/cli/flag_string_slice.go new file mode 100644 index 000000000000..e865b2ff0376 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_string_slice.go @@ -0,0 +1,138 @@ +package cli + +import ( + "flag" + "fmt" + "strings" +) + +// StringSlice is an opaque type for []string to satisfy flag.Value and flag.Getter +type StringSlice []string + +// Set appends the string value to the list of values +func (f *StringSlice) Set(value string) error { + *f = append(*f, value) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *StringSlice) String() string { + return fmt.Sprintf("%s", *f) +} + +// Value returns the slice of strings set by this flag +func (f *StringSlice) Value() []string { + return *f +} + +// Get returns the slice of strings set by this flag +func (f *StringSlice) Get() interface{} { + return *f +} + +// StringSliceFlag is a flag with type *StringSlice +type StringSliceFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + TakesFile bool + Value *StringSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringSliceFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f StringSliceFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f StringSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f StringSliceFlag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f StringSliceFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f StringSliceFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringSliceFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + newVal := &StringSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as string value for flag %s: %s", envVal, f.Name, err) + } + } + if f.Value == nil { + f.Value = newVal + } else { + *f.Value = *newVal + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &StringSlice{} + } + set.Var(f.Value, name, f.Usage) + }) + + return nil +} + +// StringSlice looks up the value of a local StringSliceFlag, returns +// nil if not found +func (c *Context) StringSlice(name string) []string { + return lookupStringSlice(name, c.flagSet) +} + +// GlobalStringSlice looks up the value of a global StringSliceFlag, returns +// nil if not found +func (c *Context) GlobalStringSlice(name string) []string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*StringSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} diff --git a/vendor/github.com/urfave/cli/flag_uint.go b/vendor/github.com/urfave/cli/flag_uint.go new file mode 100644 index 000000000000..d6a04f408705 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_uint.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// UintFlag is a flag with type uint +type UintFlag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value uint + Destination *uint +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f UintFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f UintFlag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f UintFlag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f UintFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f UintFlag) GetUsage() string { + return f.Usage +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f UintFlag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f UintFlag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = uint(envValInt) + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.UintVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint(name, f.Value, f.Usage) + }) + + return nil +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f UintFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Uint looks up the value of a local UintFlag, returns +// 0 if not found +func (c *Context) Uint(name string) uint { + return lookupUint(name, c.flagSet) +} + +// GlobalUint looks up the value of a global UintFlag, returns +// 0 if not found +func (c *Context) GlobalUint(name string) uint { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint(name, fs) + } + return 0 +} + +func lookupUint(name string, set *flag.FlagSet) uint { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return uint(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/flag_uint64.go b/vendor/github.com/urfave/cli/flag_uint64.go new file mode 100644 index 000000000000..ea6493a8be88 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_uint64.go @@ -0,0 +1,106 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// Uint64Flag is a flag with type uint64 +type Uint64Flag struct { + Name string + Usage string + EnvVar string + FilePath string + Required bool + Hidden bool + Value uint64 + Destination *uint64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Uint64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Uint64Flag) GetName() string { + return f.Name +} + +// IsRequired returns whether or not the flag is required +func (f Uint64Flag) IsRequired() bool { + return f.Required +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f Uint64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f Uint64Flag) GetUsage() string { + return f.Usage +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f Uint64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Uint64Flag) Apply(set *flag.FlagSet) { + _ = f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Uint64Flag) ApplyWithError(set *flag.FlagSet) error { + if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint64 value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = envValInt + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Uint64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint64(name, f.Value, f.Usage) + }) + + return nil +} + +// Uint64 looks up the value of a local Uint64Flag, returns +// 0 if not found +func (c *Context) Uint64(name string) uint64 { + return lookupUint64(name, c.flagSet) +} + +// GlobalUint64 looks up the value of a global Uint64Flag, returns +// 0 if not found +func (c *Context) GlobalUint64(name string) uint64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint64(name, fs) + } + return 0 +} + +func lookupUint64(name string, set *flag.FlagSet) uint64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/funcs.go b/vendor/github.com/urfave/cli/funcs.go index b335dbf6d337..0036b1130aa0 100644 --- a/vendor/github.com/urfave/cli/funcs.go +++ b/vendor/github.com/urfave/cli/funcs.go @@ -39,3 +39,6 @@ type FlagNamePrefixFunc func(fullName, placeholder string) string // with the environment variable details. type FlagEnvHintFunc func(envVar, str string) string +// FlagFileHintFunc is used by the default FlagStringFunc to annotate flag help +// with the file path details. +type FlagFileHintFunc func(filePath, str string) string diff --git a/vendor/github.com/urfave/cli/go.mod b/vendor/github.com/urfave/cli/go.mod new file mode 100644 index 000000000000..7d04d20167ea --- /dev/null +++ b/vendor/github.com/urfave/cli/go.mod @@ -0,0 +1,9 @@ +module github.com/urfave/cli + +go 1.11 + +require ( + github.com/BurntSushi/toml v0.3.1 + github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/urfave/cli/help.go b/vendor/github.com/urfave/cli/help.go index ed084fc1d670..7a4ef6925951 100644 --- a/vendor/github.com/urfave/cli/help.go +++ b/vendor/github.com/urfave/cli/help.go @@ -7,78 +7,9 @@ import ( "strings" "text/tabwriter" "text/template" + "unicode/utf8" ) -// AppHelpTemplate is the text template for the Default help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var AppHelpTemplate = `NAME: - {{.Name}}{{if .Usage}} - {{.Usage}}{{end}} - -USAGE: - {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} - -VERSION: - {{.Version}}{{end}}{{end}}{{if .Description}} - -DESCRIPTION: - {{.Description}}{{end}}{{if len .Authors}} - -AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: - {{range $index, $author := .Authors}}{{if $index}} - {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}} - -COMMANDS:{{range .VisibleCategories}}{{if .Name}} - - {{.Name}}:{{end}}{{range .VisibleCommands}} - {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} - -GLOBAL OPTIONS: - {{range $index, $option := .VisibleFlags}}{{if $index}} - {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}} - -COPYRIGHT: - {{.Copyright}}{{end}} -` - -// CommandHelpTemplate is the text template for the command help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var CommandHelpTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} - -CATEGORY: - {{.Category}}{{end}}{{if .Description}} - -DESCRIPTION: - {{.Description}}{{end}}{{if .VisibleFlags}} - -OPTIONS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -` - -// SubcommandHelpTemplate is the text template for the subcommand help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var SubcommandHelpTemplate = `NAME: - {{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}} - -USAGE: - {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} - -COMMANDS:{{range .VisibleCategories}}{{if .Name}} - {{.Name}}:{{end}}{{range .VisibleCommands}} - {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} -{{end}}{{if .VisibleFlags}} -OPTIONS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -` - var helpCommand = Command{ Name: "help", Aliases: []string{"h"}, @@ -90,7 +21,7 @@ var helpCommand = Command{ return ShowCommandHelp(c, args.First()) } - ShowAppHelp(c) + _ = ShowAppHelp(c) return nil }, } @@ -130,7 +61,7 @@ var VersionPrinter = printVersion // ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code. func ShowAppHelpAndExit(c *Context, exitCode int) { - ShowAppHelp(c) + _ = ShowAppHelp(c) os.Exit(exitCode) } @@ -154,19 +85,94 @@ func ShowAppHelp(c *Context) (err error) { // DefaultAppComplete prints the list of subcommands as the default app completion method func DefaultAppComplete(c *Context) { - for _, command := range c.App.Commands { + DefaultCompleteWithFlags(nil)(c) +} + +func printCommandSuggestions(commands []Command, writer io.Writer) { + for _, command := range commands { if command.Hidden { continue } - for _, name := range command.Names() { - fmt.Fprintln(c.App.Writer, name) + if os.Getenv("_CLI_ZSH_AUTOCOMPLETE_HACK") == "1" { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s:%s\n", name, command.Usage) + } + } else { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s\n", name) + } + } + } +} + +func cliArgContains(flagName string) bool { + for _, name := range strings.Split(flagName, ",") { + name = strings.TrimSpace(name) + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 + } + flag := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + for _, a := range os.Args { + if a == flag { + return true + } + } + } + return false +} + +func printFlagSuggestions(lastArg string, flags []Flag, writer io.Writer) { + cur := strings.TrimPrefix(lastArg, "-") + cur = strings.TrimPrefix(cur, "-") + for _, flag := range flags { + if bflag, ok := flag.(BoolFlag); ok && bflag.Hidden { + continue + } + for _, name := range strings.Split(flag.GetName(), ",") { + name = strings.TrimSpace(name) + // this will get total count utf8 letters in flag name + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 // resuse this count to generate single - or -- in flag completion + } + // if flag name has more than one utf8 letter and last argument in cli has -- prefix then + // skip flag completion for short flags example -v or -x + if strings.HasPrefix(lastArg, "--") && count == 1 { + continue + } + // match if last argument matches this flag and it is not repeated + if strings.HasPrefix(name, cur) && cur != name && !cliArgContains(flag.GetName()) { + flagCompletion := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + _, _ = fmt.Fprintln(writer, flagCompletion) + } + } + } +} + +func DefaultCompleteWithFlags(cmd *Command) func(c *Context) { + return func(c *Context) { + if len(os.Args) > 2 { + lastArg := os.Args[len(os.Args)-2] + if strings.HasPrefix(lastArg, "-") { + printFlagSuggestions(lastArg, c.App.Flags, c.App.Writer) + if cmd != nil { + printFlagSuggestions(lastArg, cmd.Flags, c.App.Writer) + } + return + } + } + if cmd != nil { + printCommandSuggestions(cmd.Subcommands, c.App.Writer) + } else { + printCommandSuggestions(c.App.Commands, c.App.Writer) } } } // ShowCommandHelpAndExit - exits with code after showing help func ShowCommandHelpAndExit(c *Context, command string, code int) { - ShowCommandHelp(c, command) + _ = ShowCommandHelp(c, command) os.Exit(code) } @@ -208,7 +214,7 @@ func ShowVersion(c *Context) { } func printVersion(c *Context) { - fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) + _, _ = fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) } // ShowCompletions prints the lists of commands within a given context @@ -222,19 +228,22 @@ func ShowCompletions(c *Context) { // ShowCommandCompletions prints the custom completions for a given command func ShowCommandCompletions(ctx *Context, command string) { c := ctx.App.Command(command) - if c != nil && c.BashComplete != nil { - c.BashComplete(ctx) + if c != nil { + if c.BashComplete != nil { + c.BashComplete(ctx) + } else { + DefaultCompleteWithFlags(c)(ctx) + } } + } func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc map[string]interface{}) { funcMap := template.FuncMap{ "join": strings.Join, } - if customFunc != nil { - for key, value := range customFunc { - funcMap[key] = value - } + for key, value := range customFunc { + funcMap[key] = value } w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) @@ -244,11 +253,11 @@ func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc m // If the writer is closed, t.Execute will fail, and there's nothing // we can do to recover. if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { - fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + _, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) } return } - w.Flush() + _ = w.Flush() } func printHelp(out io.Writer, templ string, data interface{}) { @@ -281,7 +290,7 @@ func checkHelp(c *Context) bool { func checkCommandHelp(c *Context, name string) bool { if c.Bool("h") || c.Bool("help") { - ShowCommandHelp(c, name) + _ = ShowCommandHelp(c, name) return true } @@ -290,7 +299,7 @@ func checkCommandHelp(c *Context, name string) bool { func checkSubcommandHelp(c *Context) bool { if c.Bool("h") || c.Bool("help") { - ShowSubcommandHelp(c) + _ = ShowSubcommandHelp(c) return true } diff --git a/vendor/github.com/urfave/cli/parse.go b/vendor/github.com/urfave/cli/parse.go new file mode 100644 index 000000000000..865accf1027f --- /dev/null +++ b/vendor/github.com/urfave/cli/parse.go @@ -0,0 +1,80 @@ +package cli + +import ( + "flag" + "strings" +) + +type iterativeParser interface { + newFlagSet() (*flag.FlagSet, error) + useShortOptionHandling() bool +} + +// To enable short-option handling (e.g., "-it" vs "-i -t") we have to +// iteratively catch parsing errors. This way we achieve LR parsing without +// transforming any arguments. Otherwise, there is no way we can discriminate +// combined short options from common arguments that should be left untouched. +func parseIter(ip iterativeParser, args []string) (*flag.FlagSet, error) { + for { + set, err := ip.newFlagSet() + if err != nil { + return nil, err + } + + err = set.Parse(args) + if !ip.useShortOptionHandling() || err == nil { + return set, err + } + + errStr := err.Error() + trimmed := strings.TrimPrefix(errStr, "flag provided but not defined: ") + if errStr == trimmed { + return nil, err + } + + // regenerate the initial args with the split short opts + newArgs := []string{} + for i, arg := range args { + if arg != trimmed { + newArgs = append(newArgs, arg) + continue + } + + shortOpts := splitShortOptions(set, trimmed) + if len(shortOpts) == 1 { + return nil, err + } + + // add each short option and all remaining arguments + newArgs = append(newArgs, shortOpts...) + newArgs = append(newArgs, args[i+1:]...) + args = newArgs + } + } +} + +func splitShortOptions(set *flag.FlagSet, arg string) []string { + shortFlagsExist := func(s string) bool { + for _, c := range s[1:] { + if f := set.Lookup(string(c)); f == nil { + return false + } + } + return true + } + + if !isSplittable(arg) || !shortFlagsExist(arg) { + return []string{arg} + } + + separated := make([]string, 0, len(arg)-1) + for _, flagChar := range arg[1:] { + separated = append(separated, "-"+string(flagChar)) + } + + return separated +} + +func isSplittable(flagArg string) bool { + return strings.HasPrefix(flagArg, "-") && !strings.HasPrefix(flagArg, "--") && len(flagArg) > 2 +} diff --git a/vendor/github.com/urfave/cli/sort.go b/vendor/github.com/urfave/cli/sort.go new file mode 100644 index 000000000000..23d1c2f77207 --- /dev/null +++ b/vendor/github.com/urfave/cli/sort.go @@ -0,0 +1,29 @@ +package cli + +import "unicode" + +// lexicographicLess compares strings alphabetically considering case. +func lexicographicLess(i, j string) bool { + iRunes := []rune(i) + jRunes := []rune(j) + + lenShared := len(iRunes) + if lenShared > len(jRunes) { + lenShared = len(jRunes) + } + + for index := 0; index < lenShared; index++ { + ir := iRunes[index] + jr := jRunes[index] + + if lir, ljr := unicode.ToLower(ir), unicode.ToLower(jr); lir != ljr { + return lir < ljr + } + + if ir != jr { + return ir < jr + } + } + + return i < j +} diff --git a/vendor/github.com/urfave/cli/template.go b/vendor/github.com/urfave/cli/template.go new file mode 100644 index 000000000000..c631fb97dd1a --- /dev/null +++ b/vendor/github.com/urfave/cli/template.go @@ -0,0 +1,121 @@ +package cli + +// AppHelpTemplate is the text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{.Name}}{{if .Usage}} - {{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if len .Authors}} + +AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: + {{range $index, $author := .Authors}}{{if $index}} + {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + + {{.Name}}:{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + +GLOBAL OPTIONS: + {{range $index, $option := .VisibleFlags}}{{if $index}} + {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}} + +COPYRIGHT: + {{.Copyright}}{{end}} +` + +// CommandHelpTemplate is the text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +// SubcommandHelpTemplate is the text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + + {{.Name}}:{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +var MarkdownDocTemplate = `% {{ .App.Name }}(8) {{ .App.Description }} + +% {{ .App.Author }} + +# NAME + +{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }} + +# SYNOPSIS + +{{ .App.Name }} +{{ if .SynopsisArgs }} +` + "```" + ` +{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + ` +{{ end }}{{ if .App.UsageText }} +# DESCRIPTION + +{{ .App.UsageText }} +{{ end }} +**Usage**: + +` + "```" + ` +{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...] +` + "```" + ` +{{ if .GlobalArgs }} +# GLOBAL OPTIONS +{{ range $v := .GlobalArgs }} +{{ $v }}{{ end }} +{{ end }}{{ if .Commands }} +# COMMANDS +{{ range $v := .Commands }} +{{ $v }}{{ end }}{{ end }}` + +var FishCompletionTemplate = `# {{ .App.Name }} fish shell completion + +function __fish_{{ .App.Name }}_no_subcommand --description 'Test if there has been any subcommand yet' + for i in (commandline -opc) + if contains -- $i{{ range $v := .AllCommands }} {{ $v }}{{ end }} + return 1 + end + end + return 0 +end + +{{ range $v := .Completions }}{{ $v }} +{{ end }}` From 4e8a49948c0a825d2ca79745c16cc44df1984aff Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Wed, 11 Sep 2019 13:24:54 -0700 Subject: [PATCH 60/77] Encrypt the secret file in containerd/containerd. Signed-off-by: Lantao Liu --- script/release/deploy-cri | 2 +- script/release/gcp-secret.json.enc | Bin 2352 -> 2352 bytes 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/script/release/deploy-cri b/script/release/deploy-cri index fdfb23bf5de8..efdbea83c42f 100755 --- a/script/release/deploy-cri +++ b/script/release/deploy-cri @@ -28,7 +28,7 @@ export CLOUDSDK_CORE_DISABLE_PROMPTS=1 curl https://sdk.cloud.google.com | bash > /dev/null gcloud version -openssl aes-256-cbc -K $encrypted_0a6446eb3ae3_key -iv $encrypted_0a6446eb3ae3_iv -in "${ROOT}/script/release/gcp-secret.json.enc" -out gcp-secret.json -d +openssl aes-256-cbc -K $encrypted_5a565171e51f_key -iv $encrypted_5a565171e51f_iv -in "${ROOT}/script/release/gcp-secret.json.enc" -out gcp-secret.json -d gcloud auth activate-service-account --key-file gcp-secret.json --project=k8s-cri-containerd gsutil cp "${ROOT}/releases/cri/*.tar.gz" "${BUCKET}" diff --git a/script/release/gcp-secret.json.enc b/script/release/gcp-secret.json.enc index 53e623a5597fe513d4524107dd538da6d3e2d708..fb21f72e3d9b5c79b7c2b650ba72c9163f8882f3 100644 GIT binary patch literal 2352 zcmV-03D5TTM@>=l?Gj_2)bs}aY56meo8Q9VwDh@S%&EhB(Pss+W}|@vTjhqrwORK(*{G3EcpE_|^tSrH*r#YQvBjt`@=4 zWe*YVE%xwm>qrt9A8tu_AKiJnsa5&!l@#7$17_=R5u`YCm2S@2T8D6Z8LpYn+!`5P31Sj8wXkKoYfa zp&uD_Jp%oQ25v|**e0nHcX_AFX$MX+K|LH~_OETtQ^|OFXr@K&iZNL$Vu2wt;$a{U zPkV)q5DGD@)6pK`qMZ3?xI%_fpW)EItXEJ9T(O8!(J>yh!mGfsNz}9fQg#`V7hzy? z!Cj^dv?9nR8TPK@7Ws5xO&3$I)@dVe=_6gR#CA=ypPRakWEk%dfoYCMx0rc!B5PFA z!Us86x&7Dmd&CgH^?y+@oYb@Yy4l8KV)1x^amyNj=HLW1Y4Ty8;X3QtH_Gvoak)?M zGD=co^!V8$^@nt6@JsT2x_a72~=S5w*OP#*K0Rk59mZn9m9-Lu- zYPNq%_XCDwl(xpiq43;<&CL@(&Zh4nDG(BNNn1+&Nqa(==V4O$1$_tR0Kri>C;T57 z2`1X+{-t!fM@m2V9#REOpyny9{+UoQZzWL@@AwBlIkq4IAuhyfdLG~;X@g=4RncTE zF|-i1cv7PZz?uH2ck+wfa2eOpY57o*3XeTWxt|gTfEsq(;Dlxoybc3Q+r-$cxnd() zTx`_~fFS>gT-Gb$BcVo`V1nRQF1{Y8^O~AJH2nYw+V`Nmtk8Qx>SMcEUY~ zCd#ciKFQWx9Ro};DVZj2?Yl8$S)i^Y*o<1hepLTQKQ@A>XEfIq1)C^+MMK$_+jc81 zp7XGoQ3FU;Nt$gHXbXFpuY;PLWR_;HrWtgMe5Xv#j$jygrS)^4icFN))03DiXQ#Sm zE#fgb3rL}}MreYM@8g07Q>WT2yl9SIinQ_rfl!orHk_^YH%1=A_%>t$XMXfz$VD0s zgH(V71Fk)1U860qU|FMx7?_3dYb)%Zt9}w30AJv^XiEBmi?lxYuTT5S_eYQaSgc&! zVeKGo0Yq~C>Mk50i`(@7g%kc!DK%P12EhnfQObCZjWFpu8kp}gr zV*LwoNZNdtq*EUsT~dawEP{4ULZBa0Ais5|dCVmPaOf8tt}U>z%UqBEy-y@KSFCtf zJeX;wzcz?c1c;PM-RT!g{}=+(CvHted{oeek8$rpu^~-{x=#&2-)p5S35qgBD-9|x z!zSv!pY%5!SWVc`EDK0~SZ!WHJt{s~joy^-u>u7|31)M-%7rKh*7lDfa#xPqd+~nH z_dAaxMI;k8|Jq+?+=O%1bVLHLh{HnPy%^40U-(km$khemOl( z;}H7IC{Awp7{@aGPo510LU@^!-vcglNG&1)VM@p@wsx`@IvZ3o3&qyrmxfNYf(RY< zDI(?eQEC14U^{;j2=Y^%u_a|2*U<@=E!n?m%V%Wf>6A6D&g2di6JVC@w zy6)%j%S3Lao?+c+UoU-Ctc72nDp2_v_Gsm;eC^^>m~tr#LWQumhpxD&fs)`vcEf8% zRZEYL$1^_emV4Ei#+4ln-mGv~W{=O_)@}OZKbquSX$Cg^0UFkdie}E?b5e}(K%ahD zfW#VuPz@F)GH4I2LECFpicbeVT#&#l0IAndVdVd%twIHz<+C!C$&Ygft)GOTBMG&d zv!^-GPj(TdK8S57vr53B<+FiIxsX~^T6ur7Z=ANHwKv0%cCjR(x2LVTPNIQ3T+`4*!(2Dc2tP=K2y3Z8UE3xN<|VBPzgLf-v{2 zsoyNTb)rqx=Sox4GvVbm)3MN-q$1mW1&%E$(8p-99<7Ik7m;se=c2(6L9zg{Nn%3j z*{rF`0Z;)ivw->{eSPjuH^ezv^w8{`{x)Gfhrt%nIG7`YANdH0WqAK|BAT|%1bN`5 z)cz2Zb&j05>j{=WPjgvokh1_vYwe5}L*Zg214v0lWgE49u80_~py4{*^+0C{mT423 zyY)7|;p0RWR8_P-rX$@0ribh$>W=@Q-Q})O%K=wW!cE9< WUwQ!clGq?IK~2-T07jtME-+xX$b9$! literal 2352 zcmV-03D5R)lfxhfmvJR9g{qwzu7iTKPO=mT8-DI#Hf19LrYLfpq8c15;s>-~(|L&3 z^;->H-&CLp(58)XLwLfz6Y@DE`y8n^qq*^C)&{OeE{%d?y|OSn^fv-oVM2y8f(mIS zc^%u`r0&~~W%Nfh1}Gubd75?XcImRul;@N}TEh}Iblp2V9#&6IeWO)Xc|aTN2BHPe zDK`%wfZRM}m$xHO55kZ!d7CoCh*iac{_>LR?w% ze%K8@g0$+bac5 z1Jx$wQjZ8Vdn!fNon5SXZUy#;IuTKUy_DJWsL%~Gpv^RGGi7I?my=jzZzEe_X7gcu z^`CEA9Q#zdrAJihTdjXn?u)>^Ait_d*~0nmOV1L^lO z;1>4^0Yh(d+Cpm>G{zS6)AWt8uz8SjbX7_I{j+N;NTjq$o>{XeE><3+_b753b7f74 zCdn=9-G{xu%nZ7NSsGSFi6>rVQXq21IOx{fER%c`UMID#JXw zWi6YAxP*q8RZAJ``4@>AUwi>XX5HEV!kf`LvmoB374c3o8P z)<>dB&kl7%f(V5J%^}k9@3;u+o0Sf|KMN~9;~nwhvs%miu;*ec@iMPTnbX({Sv=XP zsS=P5^c8}N6gEcMMN67&KQc(n7)#&dUEfyOd3-g(4Pu_P`5HCL!Ejk9xsBgXJ9(Ec zqW2GBAEuRS7CJIiTb&veS!y}Ar^TdM>XpY?sHXYsSF{Ucw(fKZeZKicsHRrNs+uvY zH1r>(F&g>yE!-BfqP%BQ52>&;#6|ao$M26oJddNB4B?htj%IB5*_MA--Wgmz5SPvV zM=-Y4vDza~Yu&|s!Qmu7f9zFf(mUD{JFd+;t?e@WQp%~5TxH|&Se9|+G?v?_L(iBr z1k$3U(qSgM`^XJrj^WK#spH69-X-PUe+S@s&Y3k0e`nSXUuMtjtzLj3-A;NvyWSoZ zAP8D42_Zfwoeiqb9d40Kd@e_TP;cb)dj}Vr=+o!gfRPh@_6yvk?d1%crMSyAv>FgA zkMFvdjWCkf=DcReVhJ|LnC?FQq&O?KqgO~~n1n$_M8C1c$uvBK3PRuJk>hzOg@?-R zl90=pYu^@&TG`UAYB0bN!K9m0Ll+68|IkqU;Yrv?!DW7v{vCq{)T_uy%e3`aP>)PC zBi&rX)^H)C;MP!26gaqQ^d;qWkm?RqZq)X=O6ysLKVKA z57#z<{E!sF@7cw2&G zFR$#N8T@gQ6j8U^ovD0-6cEMEwmM|CH(e=D$6 z2)o2uu)SyI@HCFscSA}2N~~IxyG%o$f4+W)-dU=bjXqIHaIEX8EDzBp$vRI+&_7^Q zBP6TGd}Yp!HKZst?(h967!Sl>co?l~R?gC(!nLIC&vfhHJwIZ&N^08~q16s@RT8Ob z!7UJC2Zwm+NPggamF14B~75M9a>S}l$4-Fgx?cSS7{6 zS&(-efq-a#f@>`s59Sk|T5wghEl|!m`y{@Gfc4tx^K)D&^P3b@s#DT9$C!*TbIcAm zQbURXxdlwOhH6Qyu=Tx&q9II4>N|FF$87_=OqHY_K@r`!UJ0v@#14_8xIkjwyI`OO zM@@wQza_%xlQ!HHX_N5u)l^nGuD~_`%7-HmnnRGLU3)WiLsJ}D933iXa-5tU zD1}(#e7SXPrL#C)o7AV2`adk;Dl7%#A>Aui;&x{=NODw6g0TbK8d0db zEZ<7_S~<9q{RW@|gxOI)HfyHf3dz<&o-1R5TsTW%h@8mpani7VYB5PSx~kgA zYb&jnL53EL01O#Fpyq@+{PFG03JxQ7z!o z)3eA<7Xj6PaykTpSViMb+Gdc3|HXg~#q6+gzk}Tr7yo+psqngMSF3-VKmf`#b%|@p zJW-)L_rZ!v(^g({HOU0H7T?B%;~(_g@dEWbb8$8JKS&hZt7obpwcJCzATicUMsX2LJej*zXBYp*kikd$j#x zkS=#}*g@j77Yj&JBrBFg`fP8#?LoHE*&Te?wNaR=M!TOF^5Ue4LHent?WW)F zuLZ!3u5Z0wubU_1X|-TqMFWyZQ2~p=SDFim8-A81`k`6-lUUC<<_@>;sC#$EUD4M|K}qw z5AL!&YdgEJ#0o@Kw`}+H4|LsD@NE%6POP`PmF=BJj4ZT5SQVk6UjWS3b>AA(^(GZ^ WKso8)VH##R9451@a;n@$^QRx!B#0IO From 36c543a7267c0977cbaea667c51fae7dddf618d3 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 9 Sep 2019 15:08:19 -0700 Subject: [PATCH 61/77] Remove extra CI testing on older Ubuntu Testing the older LTS does not require testing the full matrix to test for compatibility. Signed-off-by: Derek McGowan --- .travis.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index 66c1dc889918..7110c100bf7a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,18 +21,10 @@ env: matrix: include: # Skip testing previous LTS (Xenial / Ubuntu 16.04 LTS) on pull requests - - if: type != pull_request - os: linux - dist: xenial - env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v1 TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial - if: type != pull_request os: linux dist: xenial env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runc.v2 TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial - - if: type != pull_request - os: linux - dist: xenial - env: TRAVIS_GOOS=linux TEST_RUNTIME=io.containerd.runtime.v1.linux TRAVIS_CGO_ENABLED=1 TRAVIS_DISTRO=xenial go_import_path: github.com/containerd/containerd From f997c30ec6a30cb128b2e0856389e5dd3be0dccb Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Wed, 11 Sep 2019 16:49:36 -0700 Subject: [PATCH 62/77] Skip cleanup in travis deploy and avoid gcs race condition. Signed-off-by: Lantao Liu --- .travis.yml | 1 + script/release/deploy-cri | 10 ++++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index b62d6bf3d56b..837ffecdd0a1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -131,6 +131,7 @@ deploy: tags: true - provider: script script: bash script/release/deploy-cri + skip_cleanup: true on: repo: containerd/containerd # TODO: switch `tags: true` after validating on master diff --git a/script/release/deploy-cri b/script/release/deploy-cri index efdbea83c42f..793b87547a40 100755 --- a/script/release/deploy-cri +++ b/script/release/deploy-cri @@ -31,5 +31,11 @@ gcloud version openssl aes-256-cbc -K $encrypted_5a565171e51f_key -iv $encrypted_5a565171e51f_iv -in "${ROOT}/script/release/gcp-secret.json.enc" -out gcp-secret.json -d gcloud auth activate-service-account --key-file gcp-secret.json --project=k8s-cri-containerd -gsutil cp "${ROOT}/releases/cri/*.tar.gz" "${BUCKET}" -gsutil cp "${ROOT}/releases/cri/*.tar.gz.sha256" "${BUCKET}" +for file in $(ls "${ROOT}"/releases/cri/*.tar.gz.sha256); do + output="$(gsutil cp -n "${file}" "${BUCKET}" 2>&1)" + if [[ "$output" =~ "Skipping existing item" ]];then + echo "$(basename ${file}) already exists, skip the release tarball" + continue + fi + gsutil cp "${file%.sha256}" "${BUCKET}" +done From 6ed809168e580b5081dc7eb35114dc5adbb62fed Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Thu, 12 Sep 2019 00:31:53 -0700 Subject: [PATCH 63/77] Publish cri release to gs://cri-containerd-release. Signed-off-by: Lantao Liu --- .travis.yml | 3 +-- script/release/deploy-cri | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 837ffecdd0a1..a703fb1bdf4b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -134,6 +134,5 @@ deploy: skip_cleanup: true on: repo: containerd/containerd - # TODO: switch `tags: true` after validating on master - branch: master + tags: true condition: $TRAVIS_GOOS = linux diff --git a/script/release/deploy-cri b/script/release/deploy-cri index 793b87547a40..7fefab4ec97b 100755 --- a/script/release/deploy-cri +++ b/script/release/deploy-cri @@ -20,8 +20,7 @@ set -eu -o pipefail ROOT=${GOPATH}/src/github.com/containerd/containerd -# TODO: Change cri-containerd-release after tested. -BUCKET="gs://cri-containerd-staging" +BUCKET="gs://cri-containerd-release" rm -rf "${HOME}/google-cloud-sdk" export CLOUDSDK_CORE_DISABLE_PROMPTS=1 From 5a656cacb4cda97a738b898c7437ba6189471526 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 12 Sep 2019 14:18:09 -0400 Subject: [PATCH 64/77] Move manpage gen to separate binary This moves the man page generation to a separate binary Signed-off-by: Michael Crosby --- Makefile | 7 +--- cmd/containerd-stress/main.go | 2 - cmd/containerd/main.go | 2 - cmd/ctr/main.go | 2 - cmd/gen-manpages/main.go | 57 ++++++++++++++++++++++++++ pkg/climan/cli.go | 75 ----------------------------------- 6 files changed, 59 insertions(+), 86 deletions(-) create mode 100644 cmd/gen-manpages/main.go delete mode 100644 pkg/climan/cli.go diff --git a/Makefile b/Makefile index 487f6954ebb0..1e5fc465f2cd 100644 --- a/Makefile +++ b/Makefile @@ -82,7 +82,6 @@ TEST_REQUIRES_ROOT_PACKAGES=$(filter \ # Project binaries. COMMANDS=ctr containerd containerd-stress -MANBINARIES=ctr containerd containerd-stress MANPAGES=ctr.1 containerd.1 containerd-config.1 containerd-config.toml.5 ifdef BUILDTAGS @@ -204,10 +203,8 @@ man: mandir $(addprefix man/,$(MANPAGES)) mandir: @mkdir -p man -genman: $(addprefix genman/,$(MANBINARIES)) - -genman/%: bin/% FORCE - "$<" gen-man --format man man/ +genman: FORCE + go run cmd/gen-manpages/main.go man/ man/%: docs/man/%.md FORCE @echo "$(WHALE) $<" diff --git a/cmd/containerd-stress/main.go b/cmd/containerd-stress/main.go index b842df333a95..9ef058b907f5 100644 --- a/cmd/containerd-stress/main.go +++ b/cmd/containerd-stress/main.go @@ -30,7 +30,6 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/pkg/climan" "github.com/containerd/containerd/plugin" metrics "github.com/docker/go-metrics" "github.com/sirupsen/logrus" @@ -162,7 +161,6 @@ func main() { } app.Commands = []cli.Command{ densityCommand, - climan.Command, } app.Action = func(context *cli.Context) error { config := config{ diff --git a/cmd/containerd/main.go b/cmd/containerd/main.go index d258c9500f59..10bde45bd444 100644 --- a/cmd/containerd/main.go +++ b/cmd/containerd/main.go @@ -21,7 +21,6 @@ import ( "os" "github.com/containerd/containerd/cmd/containerd/command" - "github.com/containerd/containerd/pkg/climan" "github.com/containerd/containerd/pkg/seed" ) @@ -31,7 +30,6 @@ func init() { func main() { app := command.App() - app.Commands = append(app.Commands, climan.Command) if err := app.Run(os.Args); err != nil { fmt.Fprintf(os.Stderr, "containerd: %s\n", err) os.Exit(1) diff --git a/cmd/ctr/main.go b/cmd/ctr/main.go index 9db84d22de9c..cf72de28eb2f 100644 --- a/cmd/ctr/main.go +++ b/cmd/ctr/main.go @@ -21,7 +21,6 @@ import ( "os" "github.com/containerd/containerd/cmd/ctr/app" - "github.com/containerd/containerd/pkg/climan" "github.com/containerd/containerd/pkg/seed" "github.com/urfave/cli" ) @@ -35,7 +34,6 @@ func init() { func main() { app := app.New() app.Commands = append(app.Commands, pluginCmds...) - app.Commands = append(app.Commands, climan.Command) if err := app.Run(os.Args); err != nil { fmt.Fprintf(os.Stderr, "ctr: %s\n", err) os.Exit(1) diff --git a/cmd/gen-manpages/main.go b/cmd/gen-manpages/main.go new file mode 100644 index 000000000000..610dd3ddbc6c --- /dev/null +++ b/cmd/gen-manpages/main.go @@ -0,0 +1,57 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/containerd/cmd/containerd/command" + "github.com/containerd/containerd/cmd/ctr/app" + "github.com/urfave/cli" +) + +func main() { + if err := run(); err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } +} + +func run() error { + flag.Parse() + apps := map[string]*cli.App{ + "containerd": command.App(), + "ctr": app.New(), + } + dir := flag.Arg(0) + for name, app := range apps { + // clear out the usage as we use banners that do not display in man pages + app.Usage = "" + data, err := app.ToMan() + if err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(dir, fmt.Sprintf("%s.1", name)), []byte(data), 0644); err != nil { + return err + } + } + return nil +} diff --git a/pkg/climan/cli.go b/pkg/climan/cli.go deleted file mode 100644 index 4302b28a3abe..000000000000 --- a/pkg/climan/cli.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package climan - -import ( - "fmt" - "io/ioutil" - "path/filepath" - - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -var Command = cli.Command{ - Name: "gen-man", - Usage: "generate man pages for the cli application", - Hidden: true, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "format,f", - Usage: "specify the format in (md:man)", - Value: "md", - }, - cli.IntFlag{ - Name: "section,s", - Usage: "section of the man pages", - Value: 1, - }, - }, - Action: func(clix *cli.Context) (err error) { - // clear out the usage as we use banners that do not display in man pages - clix.App.Usage = "" - dir := clix.Args().First() - if dir == "" { - return errors.New("directory argument is required") - } - var ( - data string - ext string - ) - switch clix.String("format") { - case "man": - data, err = clix.App.ToMan() - default: - data, err = clix.App.ToMarkdown() - ext = "md" - } - if err != nil { - return err - } - return ioutil.WriteFile(filepath.Join(dir, formatFilename(clix, clix.Int("section"), ext)), []byte(data), 0644) - }, -} - -func formatFilename(clix *cli.Context, section int, ext string) string { - s := fmt.Sprintf("%s.%d", clix.App.Name, section) - if ext != "" { - s += "." + ext - } - return s -} From 5bb0281d1615cf84ad3ce7289590a360fd7020a3 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 12 Sep 2019 18:35:40 -0700 Subject: [PATCH 65/77] Fix missing vendor packages The switch to urfave/cli had a use of a /v2 API, which go modules handles correctly but vndr ignores. Downgrade urfave/cli for now until the switch to go modules. Add missing dependencies, which vndr now sees. Note that CI was not catching this issue, it seems that some part of the build process was pulling in dependencies even if they weren't in vendor, causing the build to work. However the vendor check was not seeing it. The ARM build didn't pull in other dependencies into the gopath, causing those builds to break. Signed-off-by: Derek McGowan --- vendor.conf | 4 +- .../github.com/cpuguy83/go-md2man/LICENSE.md | 21 + .../github.com/cpuguy83/go-md2man/README.md | 21 + vendor/github.com/cpuguy83/go-md2man/go.mod | 5 + .../cpuguy83/go-md2man/md2man/md2man.go | 20 + .../cpuguy83/go-md2man/md2man/roff.go | 285 ++++ .../russross/blackfriday/LICENSE.txt | 29 + .../github.com/russross/blackfriday/README.md | 369 +++++ .../github.com/russross/blackfriday/block.go | 1474 +++++++++++++++++ vendor/github.com/russross/blackfriday/doc.go | 32 + vendor/github.com/russross/blackfriday/go.mod | 1 + .../github.com/russross/blackfriday/html.go | 938 +++++++++++ .../github.com/russross/blackfriday/inline.go | 1154 +++++++++++++ .../github.com/russross/blackfriday/latex.go | 334 ++++ .../russross/blackfriday/markdown.go | 941 +++++++++++ .../russross/blackfriday/smartypants.go | 430 +++++ vendor/github.com/urfave/cli/docs.go | 5 +- vendor/github.com/urfave/cli/fish.go | 4 - vendor/github.com/urfave/cli/flag.go | 464 ++++++ vendor/github.com/urfave/cli/flag_bool.go | 109 -- vendor/github.com/urfave/cli/flag_bool_t.go | 110 -- vendor/github.com/urfave/cli/flag_duration.go | 106 -- vendor/github.com/urfave/cli/flag_float64.go | 106 -- .../github.com/urfave/cli/flag_generated.go | 943 +++++++++++ vendor/github.com/urfave/cli/flag_generic.go | 110 -- vendor/github.com/urfave/cli/flag_int.go | 105 -- vendor/github.com/urfave/cli/flag_int64.go | 106 -- .../github.com/urfave/cli/flag_int64_slice.go | 141 -- .../github.com/urfave/cli/flag_int_slice.go | 142 -- vendor/github.com/urfave/cli/flag_string.go | 98 -- .../urfave/cli/flag_string_slice.go | 138 -- vendor/github.com/urfave/cli/flag_uint.go | 106 -- vendor/github.com/urfave/cli/flag_uint64.go | 106 -- vendor/github.com/urfave/cli/go.mod | 4 +- 34 files changed, 7467 insertions(+), 1494 deletions(-) create mode 100644 vendor/github.com/cpuguy83/go-md2man/LICENSE.md create mode 100644 vendor/github.com/cpuguy83/go-md2man/README.md create mode 100644 vendor/github.com/cpuguy83/go-md2man/go.mod create mode 100644 vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go create mode 100644 vendor/github.com/cpuguy83/go-md2man/md2man/roff.go create mode 100644 vendor/github.com/russross/blackfriday/LICENSE.txt create mode 100644 vendor/github.com/russross/blackfriday/README.md create mode 100644 vendor/github.com/russross/blackfriday/block.go create mode 100644 vendor/github.com/russross/blackfriday/doc.go create mode 100644 vendor/github.com/russross/blackfriday/go.mod create mode 100644 vendor/github.com/russross/blackfriday/html.go create mode 100644 vendor/github.com/russross/blackfriday/inline.go create mode 100644 vendor/github.com/russross/blackfriday/latex.go create mode 100644 vendor/github.com/russross/blackfriday/markdown.go create mode 100644 vendor/github.com/russross/blackfriday/smartypants.go delete mode 100644 vendor/github.com/urfave/cli/flag_bool.go delete mode 100644 vendor/github.com/urfave/cli/flag_bool_t.go delete mode 100644 vendor/github.com/urfave/cli/flag_duration.go delete mode 100644 vendor/github.com/urfave/cli/flag_float64.go create mode 100644 vendor/github.com/urfave/cli/flag_generated.go delete mode 100644 vendor/github.com/urfave/cli/flag_generic.go delete mode 100644 vendor/github.com/urfave/cli/flag_int.go delete mode 100644 vendor/github.com/urfave/cli/flag_int64.go delete mode 100644 vendor/github.com/urfave/cli/flag_int64_slice.go delete mode 100644 vendor/github.com/urfave/cli/flag_int_slice.go delete mode 100644 vendor/github.com/urfave/cli/flag_string.go delete mode 100644 vendor/github.com/urfave/cli/flag_string_slice.go delete mode 100644 vendor/github.com/urfave/cli/flag_uint.go delete mode 100644 vendor/github.com/urfave/cli/flag_uint64.go diff --git a/vendor.conf b/vendor.conf index bc08afad3bb7..5f394e69c895 100644 --- a/vendor.conf +++ b/vendor.conf @@ -23,7 +23,7 @@ github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 github.com/opencontainers/runc f4982d86f7fde0b6f953cc62ccc4022c519a10a9 # v1.0.0-rc8-32-gf4982d86 github.com/konsorten/go-windows-terminal-sequences v1.0.1 github.com/sirupsen/logrus v1.4.1 -github.com/urfave/cli 388c2dd0f4ffaa8541e371d49c8413870a04d9fe # v1.22.0 + +github.com/urfave/cli v1.22.0 golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3 google.golang.org/grpc 6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0 github.com/pkg/errors v0.8.1 @@ -47,6 +47,8 @@ github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/golang-lru v0.5.3 go.opencensus.io v0.22.0 github.com/imdario/mergo v0.3.7 +github.com/cpuguy83/go-md2man v1.0.10 +github.com/russross/blackfriday v1.5.2 # cri dependencies github.com/containerd/cri f4d75d321c89b8d89bae570a7d2da1b3846c096f # release/1.3 diff --git a/vendor/github.com/cpuguy83/go-md2man/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md new file mode 100644 index 000000000000..1cade6cef6a1 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/README.md b/vendor/github.com/cpuguy83/go-md2man/README.md new file mode 100644 index 000000000000..29ed7c9e9f51 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/README.md @@ -0,0 +1,21 @@ +go-md2man +========= + +** Work in Progress ** +This still needs a lot of help to be complete, or even usable! + +Uses blackfriday to process markdown into man pages. + +### Usage + +./md2man -in /path/to/markdownfile.md -out /manfile/output/path + +### How to contribute + +We use [dep](https://github.com/golang/dep/) for vendoring Go packages. +See dep documentation for how to update. + +### TODO + +- Needs oh so much testing love +- Look into blackfriday's 2.0 API diff --git a/vendor/github.com/cpuguy83/go-md2man/go.mod b/vendor/github.com/cpuguy83/go-md2man/go.mod new file mode 100644 index 000000000000..052bd20ea26a --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/go.mod @@ -0,0 +1,5 @@ +module github.com/cpuguy83/go-md2man + +go 1.12 + +require github.com/russross/blackfriday v1.5.2 diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go new file mode 100644 index 000000000000..af62279a6154 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go @@ -0,0 +1,20 @@ +package md2man + +import ( + "github.com/russross/blackfriday" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := RoffRenderer(0) + extensions := 0 + extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS + extensions |= blackfriday.EXTENSION_TABLES + extensions |= blackfriday.EXTENSION_FENCED_CODE + extensions |= blackfriday.EXTENSION_AUTOLINK + extensions |= blackfriday.EXTENSION_SPACE_HEADERS + extensions |= blackfriday.EXTENSION_FOOTNOTES + extensions |= blackfriday.EXTENSION_TITLEBLOCK + + return blackfriday.Markdown(doc, renderer, extensions) +} diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go new file mode 100644 index 000000000000..8c29ec68738a --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go @@ -0,0 +1,285 @@ +package md2man + +import ( + "bytes" + "fmt" + "html" + "strings" + + "github.com/russross/blackfriday" +) + +type roffRenderer struct { + ListCounters []int +} + +// RoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func RoffRenderer(flags int) blackfriday.Renderer { + return &roffRenderer{} +} + +func (r *roffRenderer) GetFlags() int { + return 0 +} + +func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) { + out.WriteString(".TH ") + + splitText := bytes.Split(text, []byte("\n")) + for i, line := range splitText { + line = bytes.TrimPrefix(line, []byte("% ")) + if i == 0 { + line = bytes.Replace(line, []byte("("), []byte("\" \""), 1) + line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1) + } + line = append([]byte("\""), line...) + line = append(line, []byte("\" ")...) + out.Write(line) + } + out.WriteString("\n") + + // disable hyphenation + out.WriteString(".nh\n") + // disable justification (adjust text to left margin only) + out.WriteString(".ad l\n") +} + +func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) { + out.WriteString("\n.PP\n.RS\n\n.nf\n") + escapeSpecialChars(out, text) + out.WriteString("\n.fi\n.RE\n") +} + +func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) { + out.WriteString("\n.PP\n.RS\n") + out.Write(text) + out.WriteString("\n.RE\n") +} + +func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { // nolint: golint + out.Write(text) +} + +func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) { + marker := out.Len() + + switch { + case marker == 0: + // This is the doc header + out.WriteString(".TH ") + case level == 1: + out.WriteString("\n\n.SH ") + case level == 2: + out.WriteString("\n.SH ") + default: + out.WriteString("\n.SS ") + } + + if !text() { + out.Truncate(marker) + return + } +} + +func (r *roffRenderer) HRule(out *bytes.Buffer) { + out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n") +} + +func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) { + marker := out.Len() + r.ListCounters = append(r.ListCounters, 1) + out.WriteString("\n.RS\n") + if !text() { + out.Truncate(marker) + return + } + r.ListCounters = r.ListCounters[:len(r.ListCounters)-1] + out.WriteString("\n.RE\n") +} + +func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) { + if flags&blackfriday.LIST_TYPE_ORDERED != 0 { + out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", r.ListCounters[len(r.ListCounters)-1])) + r.ListCounters[len(r.ListCounters)-1]++ + } else { + out.WriteString(".IP \\(bu 2\n") + } + out.Write(text) + out.WriteString("\n") +} + +func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) { + marker := out.Len() + out.WriteString("\n.PP\n") + if !text() { + out.Truncate(marker) + return + } + if marker != 0 { + out.WriteString("\n") + } +} + +func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { + out.WriteString("\n.TS\nallbox;\n") + + maxDelims := 0 + lines := strings.Split(strings.TrimRight(string(header), "\n")+"\n"+strings.TrimRight(string(body), "\n"), "\n") + for _, w := range lines { + curDelims := strings.Count(w, "\t") + if curDelims > maxDelims { + maxDelims = curDelims + } + } + out.Write([]byte(strings.Repeat("l ", maxDelims+1) + "\n")) + out.Write([]byte(strings.Repeat("l ", maxDelims+1) + ".\n")) + out.Write(header) + if len(header) > 0 { + out.Write([]byte("\n")) + } + + out.Write(body) + out.WriteString("\n.TE\n") +} + +func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) { + if out.Len() > 0 { + out.WriteString("\n") + } + out.Write(text) +} + +func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { + if out.Len() > 0 { + out.WriteString("\t") + } + if len(text) == 0 { + text = []byte{' '} + } + out.Write([]byte("\\fB\\fC" + string(text) + "\\fR")) +} + +func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) { + if out.Len() > 0 { + out.WriteString("\t") + } + if len(text) > 30 { + text = append([]byte("T{\n"), text...) + text = append(text, []byte("\nT}")...) + } + if len(text) == 0 { + text = []byte{' '} + } + out.Write(text) +} + +func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) { + +} + +func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { + +} + +func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) { + out.WriteString("\n\\[la]") + out.Write(link) + out.WriteString("\\[ra]") +} + +func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) { + out.WriteString("\\fB\\fC") + escapeSpecialChars(out, text) + out.WriteString("\\fR") +} + +func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\fB") + out.Write(text) + out.WriteString("\\fP") +} + +func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\fI") + out.Write(text) + out.WriteString("\\fP") +} + +func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { +} + +func (r *roffRenderer) LineBreak(out *bytes.Buffer) { + out.WriteString("\n.br\n") +} + +func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { + out.Write(content) + r.AutoLink(out, link, 0) +} + +func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { // nolint: golint + out.Write(tag) +} + +func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\s+2") + out.Write(text) + out.WriteString("\\s-2") +} + +func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) { +} + +func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { + +} + +func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) { + out.WriteString(html.UnescapeString(string(entity))) +} + +func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) { + escapeSpecialChars(out, text) +} + +func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) { +} + +func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) { +} + +func needsBackslash(c byte) bool { + for _, r := range []byte("-_&\\~") { + if c == r { + return true + } + } + return false +} + +func escapeSpecialChars(out *bytes.Buffer, text []byte) { + for i := 0; i < len(text); i++ { + // escape initial apostrophe or period + if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { + out.WriteString("\\&") + } + + // directly copy normal characters + org := i + + for i < len(text) && !needsBackslash(text[i]) { + i++ + } + if i > org { + out.Write(text[org:i]) + } + + // escape a character + if i >= len(text) { + break + } + out.WriteByte('\\') + out.WriteByte(text[i]) + } +} diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt new file mode 100644 index 000000000000..2885af3602d8 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md new file mode 100644 index 000000000000..3c62e1375330 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/README.md @@ -0,0 +1,369 @@ +Blackfriday +[![Build Status][BuildSVG]][BuildURL] +[![Godoc][GodocV2SVG]][GodocV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with any modern Go release. With Go and git installed: + + go get -u gopkg.in/russross/blackfriday.v2 + +will download, compile, and install the package into your `$GOPATH` directory +hierarchy. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://godoc.org/gopkg.in/russross/blackfriday.v2. + +It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, +but we highly recommend using package management tool like [dep][7] or +[Glide][8] and make use of semantic versioning. With package management you +should import `github.com/russross/blackfriday` and specify that you're using +version 2.0.0. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://godoc.org/github.com/russross/blackfriday + +### Known issue with `dep` + +There is a known problem with using Blackfriday v1 _transitively_ and `dep`. +Currently `dep` prioritizes semver versions over anything else, and picks the +latest one, plus it does not apply a `[[constraint]]` specifier to transitively +pulled in packages. So if you're using something that uses Blackfriday v1, but +that something does not use `dep` yet, you will get Blackfriday v2 pulled in and +your first dependency will fail to build. + +There are couple of fixes for it, documented here: +https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version + +Meanwhile, `dep` team is working on a more general solution to the constraints +on transitive dependencies problem: https://github.com/golang/dep/issues/1124. + + +Usage +----- + +### v1 + +For basic usage, it is as simple as getting your input into a byte +slice and calling: + + output := blackfriday.MarkdownBasic(input) + +This renders it with no extensions enabled. To get a more useful +feature set, use this instead: + + output := blackfriday.MarkdownCommon(input) + +### v2 + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "gopkg.in/russross/blackfriday.v2" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options, v1 + +If you want to customize the set of options, first get a renderer +(currently only the HTML output engine), then use it to +call the more general `Markdown` function. For examples, see the +implementations of `MarkdownBasic` and `MarkdownCommon` in +`markdown.go`. + +### Custom options, v2 + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ``` go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ``` go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled (it is off by + default in the `MarkdownBasic` and `MarkdownCommon` convenience + functions), newlines in the input translate into line breaks in + the output. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): + renders output as LaTeX. + +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + [6]: https://labix.org/gopkg.in "gopkg.in" + [7]: https://github.com/golang/dep/ "dep" + [8]: https://github.com/Masterminds/glide "Glide" + + [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master + [BuildURL]: https://travis-ci.org/russross/blackfriday + [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg + [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2 diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go new file mode 100644 index 000000000000..45c21a6c267d --- /dev/null +++ b/vendor/github.com/russross/blackfriday/block.go @@ -0,0 +1,1474 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "strings" + "unicode" +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *parser) block(out *bytes.Buffer, data []byte) { + if len(data) == 0 || data[len(data)-1] != '\n' { + panic("block input is missing terminating newline") + } + + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed header: + // + // # Header 1 + // ## Header 2 + // ... + // ###### Header 6 + if p.isPrefixHeader(data) { + data = data[p.prefixHeader(out, data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.flags&EXTENSION_TITLEBLOCK != 0 { + if data[0] == '%' { + if i := p.titleBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(out, data):] + continue + } + + // fenced code block: + // + // ``` go info string here + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.r.HRule(out) + var i int + for i = 0; data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(out, data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.flags&EXTENSION_TABLES != 0 { + if i := p.table(out, data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(out, data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_ORDERED):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headers, too + data = data[p.paragraph(out, data):] + } + + p.nesting-- +} + +func (p *parser) isPrefixHeader(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.flags&EXTENSION_SPACE_HEADERS != 0 { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + if data[level] != ' ' { + return false + } + } + return true +} + +func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.flags&EXTENSION_HEADER_IDS != 0 { + j, k := 0, 0 + // find start/end of header id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract header id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + work := func() bool { + p.inline(out, data[i:end]) + return true + } + p.r.Header(out, work, level, id) + } + return skip +} + +func (p *parser) isUnderlinedHeader(data []byte) int { + // test of level 1 header + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 1 + } else { + return 0 + } + } + + // test of level 2 header + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 2 + } else { + return 0 + } + } + + return 0 +} + +func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + p.r.TitleBlock(out, data) + + return len(data) +} + +func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(out, data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(out, data, doRender); size > 0 { + return size + } + + // check for HTML CDATA + if size := p.htmlCDATA(out, data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + + return i +} + +func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { + // html block needs to end with a blank line + if i := p.isEmpty(data[start:]); i > 0 { + size := start + i + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + return 0 +} + +// HTML comment, lax form +func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { + i := p.inlineHTMLComment(out, data) + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HTML CDATA section +func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { + const cdataTag = "') { + i++ + } + i++ + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HR, which is the only self-closing block tag considered +func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + + i := 3 + for data[i] != '>' && data[i] != '\n' { + i++ + } + + if data[i] == '>' { + return p.renderHTMLBlock(out, data, i+1, doRender) + } + + return 0 +} + +func (p *parser) htmlFindTag(data []byte) (string, bool) { + i := 0 + for isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + return i + 1 +} + +func (*parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If syntax is not nil, it gets set to the syntax specified in the fence line. +// A final newline is mandatory to recognize the fence line, unless newlineOptional is true. +func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + + i++ + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + i = skipChar(data, i, ' ') + if i >= len(data) || data[i] != '\n' { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int { + var infoString string + beg, marker := isFenceLine(data, &infoString, "", false) + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + newlineOptional := !doRender + fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + p.r.BlockCode(out, work.Bytes(), infoString) + } + + return beg +} + +func (p *parser) table(out *bytes.Buffer, data []byte) int { + var header bytes.Buffer + i, columns := p.tableHeader(&header, data) + if i == 0 { + return 0 + } + + var body bytes.Buffer + + for i < len(data) { + pipes, rowStart := 0, i + for ; data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i++ + p.tableRow(&body, data[rowStart:i], columns, false) + } + + p.r.Table(out, header.Bytes(), body.Bytes(), columns) + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { + i := 0 + colCount := 1 + for i = 0; data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + header := data[:i+1] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]int, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_LEFT + dashes++ + } + for data[i] == '-' { + i++ + dashes++ + } + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_RIGHT + dashes++ + } + for data[i] == ' ' { + i++ + } + + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.tableRow(out, header, columns, true) + size = i + 1 + return +} + +func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { + i, col := 0, 0 + var rowWork bytes.Buffer + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for data[i] == ' ' { + i++ + } + + cellStart := i + + for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && data[cellEnd-1] == ' ' { + cellEnd-- + } + + var cellWork bytes.Buffer + p.inline(&cellWork, data[cellStart:cellEnd]) + + if header { + p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) + } else { + p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) + } + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + if header { + p.r.TableHeaderCell(&rowWork, nil, columns[col]) + } else { + p.r.TableCell(&rowWork, nil, columns[col]) + } + } + + // silently ignore rows with too many cells + + p.r.TableRow(out, rowWork.Bytes()) +} + +// returns blockquote prefix length +func (p *parser) quotePrefix(data []byte) int { + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + if data[i] == '>' { + if data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *parser) quote(out *bytes.Buffer, data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for data[end] != '\n' { + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end++ + + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + var cooked bytes.Buffer + p.block(&cooked, raw.Bytes()) + p.r.BlockQuote(out, cooked.Bytes()) + return end +} + +// returns prefix length for block code +func (p *parser) codePrefix(data []byte) int { + if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *parser) code(out *bytes.Buffer, data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for data[i] != '\n' { + i++ + } + i++ + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffeu + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + p.r.BlockCode(out, work.Bytes(), "") + + return i +} + +// returns unordered list item prefix +func (p *parser) uliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // need a *, +, or - followed by a space + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *parser) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for data[i] >= '0' && data[i] <= '9' { + i++ + } + + // we need >= 1 digits followed by a dot and a space + if start == i || data[i] != '.' || data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *parser) dliPrefix(data []byte) int { + i := 0 + + // need a : followed by a spaces + if data[i] != ':' || data[i+1] != ' ' { + return 0 + } + for data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { + i := 0 + flags |= LIST_ITEM_BEGINNING_OF_LIST + work := func() bool { + for i < len(data) { + skip := p.listItem(out, data[i:], &flags) + i += skip + + if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { + break + } + flags &= ^LIST_ITEM_BEGINNING_OF_LIST + } + return true + } + + p.r.List(out, work, flags) + return i +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { + // keep track of the indentation of the first line + itemIndent := 0 + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^LIST_TYPE_TERM + } + } + if i == 0 { + // if in defnition list, set term flag and continue + if *flags&LIST_TYPE_DEFINITION != 0 { + *flags |= LIST_TYPE_TERM + } else { + return 0 + } + } + + // skip leading whitespace on first line + for data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + raw.Write(data[line:i]) + line = i + continue + } + + // calculate the indentation + indent := 0 + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + } + + chunk := data[line+indent : i] + + if p.flags&EXTENSION_FENCED_CODE != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker, false) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + *flags |= LIST_ITEM_CONTAINS_BLOCK + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indent : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + if containsBlankLine { + // end the list if the type changed after a blank line + if indent <= itemIndent && + ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || + (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { + + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // to be a nested list, it must be indented more + // if not, it is the next item in the same list + if indent <= itemIndent { + break gatherlines + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix header? + case p.isPrefixHeader(chunk): + // if the header is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= LIST_ITEM_END_OF_LIST + } + } else { + *flags |= LIST_ITEM_END_OF_LIST + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + containsBlankLine = false + + // add the line into the working buffer without prefix + raw.Write(data[line+indent : i]) + + line = i + } + + // If reached end of data, the Renderer.ListItem call we're going to make below + // is definitely the last in the list. + if line >= len(data) { + *flags |= LIST_ITEM_END_OF_LIST + } + + rawBytes := raw.Bytes() + + // render the contents of the list item + var cooked bytes.Buffer + if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.block(&cooked, rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + p.inline(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.inline(&cooked, rawBytes) + } + } + + // render the actual list item + cookedBytes := cooked.Bytes() + parsedEnd := len(cookedBytes) + + // strip trailing newlines + for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { + parsedEnd-- + } + p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) + + return line +} + +// render a single paragraph that has already been parsed out +func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + // trim trailing newline + end := len(data) - 1 + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + work := func() bool { + p.inline(out, data[beg:end]) + return true + } + p.r.Paragraph(out, work) +} + +func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + p.renderParagraph(out, data[:i]) + return i + n + } + + // an underline under some text marks a header, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeader(current); level > 0 { + // render the paragraph + p.renderParagraph(out, data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + // render the header + // this ugly double closure avoids forcing variables onto the heap + work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { + return func() bool { + pp.inline(o, d) + return true + } + }(out, p, data[prev:eol]) + + id := "" + if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + p.r.Header(out, work, level, id) + + // find the end of the underline + for data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + if data[i] == '<' && p.html(out, current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a prefixed header or a horizontal rule after this, paragraph is over + if p.isPrefixHeader(current) || p.isHRule(current) { + p.renderParagraph(out, data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.flags&EXTENSION_FENCED_CODE != 0 { + if p.fencedCodeBlock(out, current, false) > 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(current) != 0 { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + // if there's a list after this, paragraph is over + if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + for data[i] != '\n' { + i++ + } + i++ + } + + p.renderParagraph(out, data[:i]) + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go new file mode 100644 index 000000000000..9656c42a1916 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/doc.go @@ -0,0 +1,32 @@ +// Package blackfriday is a Markdown processor. +// +// It translates plain text with simple formatting rules into HTML or LaTeX. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that preceed the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/go.mod b/vendor/github.com/russross/blackfriday/go.mod new file mode 100644 index 000000000000..b05561a066df --- /dev/null +++ b/vendor/github.com/russross/blackfriday/go.mod @@ -0,0 +1 @@ +module github.com/russross/blackfriday diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go new file mode 100644 index 000000000000..e0a6c69c96d7 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/html.go @@ -0,0 +1,938 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Html renderer configuration options. +const ( + HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks + HTML_SKIP_STYLE // skip embedded