From 7ea6fbf90ee05248c327101014885c00a5f0168c Mon Sep 17 00:00:00 2001 From: Steffen Siering Date: Fri, 12 Jun 2020 14:38:48 +0200 Subject: [PATCH] Init package libbeat/statestore (#19117) Initialize support for the statestore package. The addition of the statestore package is split up into multiple changeset to ease review. The final version of the package can be found [here](https://github.com/urso/beats/tree/fb-input-v2-combined/libbeat/statestore). Once finalized, the libbeat/statestore package contains: - The statestore frontend and interface for use within Beats - Interfaces for the store backend - A common set of tests store backends need to support - a storetest package for testing new features that require a store. The testing helpers use map[string]interface{} that can be initialized or queried after the test run for validation purposes. - The default memlog backend + tests This change includes the frontend and backend interfaces only. Once merged we will add the tests and finally the memlog store. --- NOTICE.txt | 48 +- go.mod | 5 +- go.sum | 10 + libbeat/statestore/backend/backend.go | 71 ++ libbeat/statestore/error.go | 91 +++ libbeat/statestore/registry.go | 87 +++ libbeat/statestore/store.go | 176 +++++ .../github.com/elastic/go-concert/.gitignore | 1 + .../github.com/elastic/go-concert/.travis.yml | 22 + .../elastic/go-concert/CHANGELOG.md | 32 + vendor/github.com/elastic/go-concert/LICENSE | 201 ++++++ .../elastic/go-concert/atomic/atomic.go | 94 +++ .../elastic/go-concert/atomic/atomic32.go | 50 ++ .../elastic/go-concert/atomic/atomic64.go | 50 ++ .../elastic/go-concert/chorus/closeref.go | 143 ++++ vendor/github.com/elastic/go-concert/go.mod | 12 + vendor/github.com/elastic/go-concert/go.sum | 54 ++ .../github.com/elastic/go-concert/nocopy.go | 24 + .../github.com/elastic/go-concert/refcount.go | 101 +++ .../github.com/elastic/go-concert/signal.go | 115 ++++ .../elastic/go-concert/unison/lockmanager.go | 436 ++++++++++++ .../go-concert/unison/multierrgroup.go | 58 ++ .../elastic/go-concert/unison/mutex.go | 134 ++++ .../go-concert/unison/safewaitgroup.go | 77 +++ .../elastic/go-concert/unison/taskgroup.go | 142 ++++ .../elastic/go-concert/unison/txlock.go | 192 ++++++ vendor/golang.org/x/lint/go.mod | 2 +- vendor/golang.org/x/lint/go.sum | 10 +- vendor/golang.org/x/mod/LICENSE | 27 + vendor/golang.org/x/mod/PATENTS | 22 + .../{tools/internal => mod}/module/module.go | 390 ++++++++--- .../{tools/internal => mod}/semver/semver.go | 4 +- .../x/tools/cmd/stringer/stringer.go | 7 +- vendor/golang.org/x/tools/go/analysis/doc.go | 35 +- .../x/tools/go/ast/astutil/imports.go | 5 +- .../x/tools/go/ast/inspector/inspector.go | 4 +- .../go/internal/gcimporter/gcimporter.go | 8 +- .../golang.org/x/tools/go/packages/golist.go | 650 +++++++----------- .../x/tools/go/packages/golist_overlay.go | 201 ++++-- .../x/tools/go/packages/packages.go | 36 +- .../x/tools/internal/fastwalk/fastwalk.go | 10 +- .../internal/fastwalk/fastwalk_portable.go | 2 +- .../tools/internal/fastwalk/fastwalk_unix.go | 2 +- .../x/tools/internal/gopathwalk/walk.go | 6 +- .../x/tools/internal/imports/fix.go | 589 ++++++++-------- .../x/tools/internal/imports/imports.go | 21 +- .../x/tools/internal/imports/mod.go | 280 +++++--- .../x/tools/internal/imports/mod_cache.go | 95 ++- .../internal/packagesinternal/packages.go | 4 + vendor/modules.txt | 15 +- 50 files changed, 3778 insertions(+), 1073 deletions(-) create mode 100644 libbeat/statestore/backend/backend.go create mode 100644 libbeat/statestore/error.go create mode 100644 libbeat/statestore/registry.go create mode 100644 libbeat/statestore/store.go create mode 100644 vendor/github.com/elastic/go-concert/.gitignore create mode 100644 vendor/github.com/elastic/go-concert/.travis.yml create mode 100644 vendor/github.com/elastic/go-concert/CHANGELOG.md create mode 100644 vendor/github.com/elastic/go-concert/LICENSE create mode 100644 vendor/github.com/elastic/go-concert/atomic/atomic.go create mode 100644 vendor/github.com/elastic/go-concert/atomic/atomic32.go create mode 100644 vendor/github.com/elastic/go-concert/atomic/atomic64.go create mode 100644 vendor/github.com/elastic/go-concert/chorus/closeref.go create mode 100644 vendor/github.com/elastic/go-concert/go.mod create mode 100644 vendor/github.com/elastic/go-concert/go.sum create mode 100644 vendor/github.com/elastic/go-concert/nocopy.go create mode 100644 vendor/github.com/elastic/go-concert/refcount.go create mode 100644 vendor/github.com/elastic/go-concert/signal.go create mode 100644 vendor/github.com/elastic/go-concert/unison/lockmanager.go create mode 100644 vendor/github.com/elastic/go-concert/unison/multierrgroup.go create mode 100644 vendor/github.com/elastic/go-concert/unison/mutex.go create mode 100644 vendor/github.com/elastic/go-concert/unison/safewaitgroup.go create mode 100644 vendor/github.com/elastic/go-concert/unison/taskgroup.go create mode 100644 vendor/github.com/elastic/go-concert/unison/txlock.go create mode 100644 vendor/golang.org/x/mod/LICENSE create mode 100644 vendor/golang.org/x/mod/PATENTS rename vendor/golang.org/x/{tools/internal => mod}/module/module.go (58%) rename vendor/golang.org/x/{tools/internal => mod}/semver/semver.go (99%) create mode 100644 vendor/golang.org/x/tools/internal/packagesinternal/packages.go diff --git a/NOTICE.txt b/NOTICE.txt index 659dbec83eb..811df0e8f3c 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1911,6 +1911,15 @@ SOFTWARE 6.11 "Subscription" means the right to receive Support Services and a License to the Commercial Software. +-------------------------------------------------------------------- +Dependency: github.com/elastic/go-concert +Version: v0.0.2 +License type (autodetected): Apache-2.0 +./vendor/github.com/elastic/go-concert/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/elastic/go-libaudit/v2 Version: v2.0.0 @@ -8135,7 +8144,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: golang.org/x/lint -Revision: fdd1cda4f05f +Revision: 910be7a94367 License type (autodetected): BSD-3-Clause ./vendor/golang.org/x/lint/LICENSE: -------------------------------------------------------------------- @@ -8167,6 +8176,41 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: golang.org/x/mod +Version: v0.1.1 +Revision: c90efee705ee +License type (autodetected): BSD-3-Clause +./vendor/golang.org/x/mod/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + -------------------------------------------------------------------- Dependency: golang.org/x/net Revision: 16171245cfb2 @@ -8373,7 +8417,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: golang.org/x/tools -Revision: 7b8e75db28f4 +Revision: b320d3a0f5a2 License type (autodetected): BSD-3-Clause ./vendor/golang.org/x/tools/LICENSE: -------------------------------------------------------------------- diff --git a/go.mod b/go.mod index b7d86005a3d..ff8759afeea 100644 --- a/go.mod +++ b/go.mod @@ -58,6 +58,7 @@ require ( github.com/eclipse/paho.mqtt.golang v1.2.1-0.20200121105743-0d940dd29fd2 github.com/elastic/ecs v1.5.0 github.com/elastic/elastic-agent-client/v7 v7.0.0-20200601155656-d6a9eb4f6d07 + github.com/elastic/go-concert v0.0.2 github.com/elastic/go-libaudit/v2 v2.0.0-20200515221334-92371bef3fb8 github.com/elastic/go-licenser v0.2.1 github.com/elastic/go-lookslike v0.3.0 @@ -150,14 +151,14 @@ require ( go.uber.org/multierr v1.3.0 go.uber.org/zap v1.14.0 golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 - golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f + golang.org/x/lint v0.0.0-20200130185559-910be7a94367 golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e golang.org/x/text v0.3.2 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 - golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 + golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2 google.golang.org/api v0.15.0 google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb google.golang.org/grpc v1.29.1 diff --git a/go.sum b/go.sum index 5d93791c0ce..9f55b113025 100644 --- a/go.sum +++ b/go.sum @@ -227,6 +227,8 @@ github.com/elastic/elastic-agent-client/v7 v7.0.0-20200601155656-d6a9eb4f6d07 h1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20200601155656-d6a9eb4f6d07/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQVCpGSRXmLqjEHpJKbR60rxh1nQZY4= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdboCbArMF/nSCDUXgQuWTeoMmE/z8607X+k7ng= +github.com/elastic/go-concert v0.0.2 h1:hJb9h99LS/lyjf7pE1wQ+eiNw+0CXVLCJR42yx+AvOQ= +github.com/elastic/go-concert v0.0.2/go.mod h1:9MtFarjXroUgmm0m6HY3NSe1XiKhdktiNRRj9hWvIaM= github.com/elastic/go-libaudit/v2 v2.0.0-20200515221334-92371bef3fb8 h1:Jcnojiuok7Ea5hitJK9VWmBigganE2MMETOH0VZasEA= github.com/elastic/go-libaudit/v2 v2.0.0-20200515221334-92371bef3fb8/go.mod h1:j2CZcVcluWDGhQTnq1SOPy1NKEIa74FtQ39Nnz87Jxk= github.com/elastic/go-licenser v0.2.1 h1:K76YI6XR2LRpewLGwhrTqasXZcNJG2yHY4/jit/IXGY= @@ -697,6 +699,7 @@ go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= @@ -735,10 +738,13 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -843,9 +849,13 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 h1:Toz2IK7k8rbltAXwNAxKcn9OzqyNfMUhUNjz3sL0NMk= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2 h1:0sfSpGSa544Fwnbot3Oxq/U6SXqjty6Jy/3wRhVS7ig= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= diff --git a/libbeat/statestore/backend/backend.go b/libbeat/statestore/backend/backend.go new file mode 100644 index 00000000000..c40d8515977 --- /dev/null +++ b/libbeat/statestore/backend/backend.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package backend + +// Registry provides access to stores managed by the backend storage. +type Registry interface { + // Access opens a store. The store will be closed by the frontend, once all + // accessed stores have been closed. + // + // The Store instance returned must be threadsafe. + Access(name string) (Store, error) + + // Close is called on shutdown after all stores have been closed. + // An implementation of Registry is not required to check for the stores to be closed. + Close() error +} + +// ValueDecoder is used to decode values into go structs or maps within a transaction. +// A ValueDecoder is supposed to be invalidated by beats after the loop operations has returned. +type ValueDecoder interface { + Decode(to interface{}) error +} + +// Store provides access to key value pairs. +type Store interface { + // Close should close the store and release all used resources. + Close() error + + // Has checks if the key exists. No error must be returned if the key does + // not exists, but the bool return must be false. + // An error return value must indicate internal errors only. The store is + // assumed to be in a 'bad' but recoverable state if 'Has' fails. + Has(key string) (bool, error) + + // Get decodes the value for the given key into value. + // Besides internal implementation specific errors an error is assumed + // to be returned if the key does not exist or the type of the value + // passed is incompatible to the actual value in the store (decoding error). + Get(key string, value interface{}) error + + // Set inserts or overwrites a key pair in the store. + // The `value` parameters can be assumed to be a struct or a map. Besides + // internal implementation specific errors, an error should be returned if + // the value given can not be encoded. + Set(key string, value interface{}) error + + // Remove removes and entry from the store. + Remove(string) error + + // Each loops over all key value pairs in the store calling fn for each pair. + // The ValueDecoder is used by fn to optionally decode the value into a + // custom struct or map. The decoder must be executable multiple times, but + // is assumed to be invalidated once fn returns + // The loop shall return if fn returns an error or false. + Each(fn func(string, ValueDecoder) (bool, error)) error +} diff --git a/libbeat/statestore/error.go b/libbeat/statestore/error.go new file mode 100644 index 00000000000..eac8bc83f38 --- /dev/null +++ b/libbeat/statestore/error.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package statestore + +import ( + "errors" + "fmt" +) + +// ErrorAccess indicates that an error occured when trying to open a Store. +type ErrorAccess struct { + name string + cause error +} + +// Store reports the name of the store that could not been accessed. +func (e *ErrorAccess) Store() string { return e.name } + +// Unwrap returns the cause for the error or nil if the cause is unknown or has +// not been reported by the backend +func (e *ErrorAccess) Unwrap() error { return e.cause } + +// Error creates a descriptive error string. +func (e *ErrorAccess) Error() string { + if e.cause == nil { + return fmt.Sprintf("failed to open store '%v'", e.name) + } + return fmt.Sprintf("failed to open store '%v': %v", e.name, e.cause) +} + +// ErrorClosed indicates that the operation failed because the store has already been closed. +type ErrorClosed struct { + name string + operation string +} + +// Store reports the name of the store that has been closed. +func (e *ErrorClosed) Store() string { return e.name } + +// Operation returns a 'readable' name for the operation that failed to access the closed store. +func (e *ErrorClosed) Operation() string { return e.operation } + +// Error creates a descriptive error string. +func (e *ErrorClosed) Error() string { + return fmt.Sprintf("can not executed %v operation on closed store '%v'", e.operation, e.name) +} + +// ErrorOperation is returned when a generic store operation failed. +type ErrorOperation struct { + name string + operation string + cause error +} + +// Store reports the name of the store. +func (e *ErrorOperation) Store() string { return e.name } + +// Operation returns a 'readable' name for the operation that failed. +func (e *ErrorOperation) Operation() string { return e.operation } + +// Unwrap returns the cause of the failure. +func (e *ErrorOperation) Unwrap() error { return e.cause } + +// Error creates a descriptive error string. +func (e *ErrorOperation) Error() string { + return fmt.Sprintf("failed in %v operation on store '%v': %v", e.operation, e.name, e.cause) +} + +// IsClosed returns true if the cause for an Error is ErrorClosed. +func IsClosed(err error) bool { + var tmp *ErrorClosed + if errors.As(err, &tmp) { + return true + } + return false +} diff --git a/libbeat/statestore/registry.go b/libbeat/statestore/registry.go new file mode 100644 index 00000000000..aca20c7d91e --- /dev/null +++ b/libbeat/statestore/registry.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package statestore + +import ( + "sync" + + "github.com/elastic/beats/v7/libbeat/statestore/backend" +) + +// Registry manages multiple key-value stores. +// When working with a registry, one must access a store. Depending on backend +// a store can be an index, a table, or a directory. All access to a store is +// handled by transaction. +type Registry struct { + backend backend.Registry + + mu sync.Mutex + active map[string]*sharedStore // active/open stores + wg sync.WaitGroup +} + +// ValueDecoder is used to decode retrieved from an actual store. A +// ValueDecoder instance is valid for the lifetime of the transaction only. +type ValueDecoder = backend.ValueDecoder + +// NewRegistry creates a new Registry with a configured backend. +func NewRegistry(backend backend.Registry) *Registry { + return &Registry{ + backend: backend, + active: map[string]*sharedStore{}, + } +} + +// Close closes the backend storage. Close blocks until all stores in use are closed. +func (r *Registry) Close() error { + r.wg.Wait() // wait for all stores being closed + return r.backend.Close() +} + +// Get opens a shared store. A store is closed and released only after all it's +// users have closed the store. +func (r *Registry) Get(name string) (*Store, error) { + r.mu.Lock() + defer r.mu.Unlock() + + shared := r.active[name] + if shared == nil { + backend, err := r.backend.Access(name) + if err != nil { + return nil, &ErrorAccess{name: name, cause: err} + } + + shared = newSharedStore(r, name, backend) + defer shared.Release() + + r.active[name] = shared + r.wg.Add(1) + } + + return newStore(shared), nil +} + +func (r *Registry) unregisterStore(s *sharedStore) { + _, exists := r.active[s.name] + if !exists { + panic("removing an unknown store") + } + + delete(r.active, s.name) + r.wg.Done() +} diff --git a/libbeat/statestore/store.go b/libbeat/statestore/store.go new file mode 100644 index 00000000000..a776efa8f7b --- /dev/null +++ b/libbeat/statestore/store.go @@ -0,0 +1,176 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package statestore + +import ( + "github.com/elastic/beats/v7/libbeat/statestore/backend" + "github.com/elastic/go-concert/atomic" + "github.com/elastic/go-concert/unison" +) + +type sharedStore struct { + reg *Registry + refCount atomic.Int + + name string + backend backend.Store +} + +// Store instance. The backend is shared between multiple instances of this store. +// The backend will be closed only after the last instance have been closed. +// No transaction can be created once the store instance has been closed. +// A Store is not thread-safe. Each go-routine accessing a store should create +// an instance using `Registry.Get`. +type Store struct { + shared *sharedStore + // wait group to ensure active operations can finish, but not started anymore after the store has been closed. + active unison.SafeWaitGroup +} + +func newSharedStore(reg *Registry, name string, backend backend.Store) *sharedStore { + return &sharedStore{ + reg: reg, + refCount: atomic.MakeInt(1), + name: name, + backend: backend, + } +} + +func newStore(shared *sharedStore) *Store { + shared.Retain() + return &Store{ + shared: shared, + } +} + +// Close deactivates the current store. No new transacation can be generated. +// Already active transaction will continue to function until Closed. +// The backing store will be closed once all stores and active transactions have been closed. +func (s *Store) Close() error { + if err := s.active.Add(1); err != nil { + return &ErrorClosed{operation: "store/close", name: s.shared.name} + } + s.active.Close() + s.active.Done() + + s.active.Wait() + return s.shared.Release() +} + +// Has checks if the given key exists. +// Has returns an error if the store has already been closed or the storage backend returns an error. +func (s *Store) Has(key string) (bool, error) { + const operation = "store/has" + if err := s.active.Add(1); err != nil { + return false, &ErrorClosed{operation: operation, name: s.shared.name} + } + defer s.active.Done() + + has, err := s.shared.backend.Has((key)) + if err != nil { + return false, &ErrorOperation{name: s.shared.name, operation: operation, cause: err} + } + return has, nil +} + +// Get unpacks the value for a given key into "into". +// Get returns an error if the store has already been closed, the key does not +// exist, or the storage backend returns an error. +func (s *Store) Get(key string, into interface{}) error { + const operation = "store/get" + if err := s.active.Add(1); err != nil { + return &ErrorClosed{operation: operation, name: s.shared.name} + } + defer s.active.Done() + + err := s.shared.backend.Get(key, into) + if err != nil { + return &ErrorOperation{name: s.shared.name, operation: operation, cause: err} + } + return nil +} + +// Set inserts or overwrite a key value pair. +// Set returns an error if the store has been closed, the value can not be +// encoded by the store, or the storage backend did failed. +func (s *Store) Set(key string, from interface{}) error { + const operation = "store/get" + if err := s.active.Add(1); err != nil { + return &ErrorClosed{operation: operation, name: s.shared.name} + } + defer s.active.Done() + + if err := s.shared.backend.Set((key), from); err != nil { + return &ErrorOperation{name: s.shared.name, operation: operation, cause: err} + } + return nil +} + +// Remove removes a key value pair from the store. Remove does not error if the +// key is unknown to the store. +// An error is returned if the store has already been closed or the operation +// itself fails in the storage backend. +func (s *Store) Remove(key string) error { + const operation = "store/remove" + if err := s.active.Add(1); err != nil { + return &ErrorClosed{operation: operation, name: s.shared.name} + } + defer s.active.Done() + + if err := s.shared.backend.Remove((key)); err != nil { + return &ErrorOperation{name: s.shared.name, operation: operation, cause: err} + } + return nil +} + +// Each iterates over all key-value pairs in the store. +// The iteration stops if fn returns false or an error value != nil. +// If the store has been closed already an error is returned. +func (s *Store) Each(fn func(string, ValueDecoder) (bool, error)) error { + if err := s.active.Add(1); err != nil { + return &ErrorClosed{operation: "store/each", name: s.shared.name} + } + defer s.active.Done() + + return s.shared.backend.Each(fn) +} + +func (s *sharedStore) Retain() { + s.refCount.Inc() +} + +func (s *sharedStore) Release() error { + if s.refCount.Dec() == 0 && s.tryUnregister() { + return s.backend.Close() + } + return nil +} + +// tryUnregister removed the store from the registry. tryUnregister returns false +// if the store has been retained in the meantime. True is returned if the store +// can be closed for sure. +func (s *sharedStore) tryUnregister() bool { + s.reg.mu.Lock() + defer s.reg.mu.Unlock() + if s.refCount.Load() > 0 { + return false + } + + s.reg.unregisterStore(s) + return true +} diff --git a/vendor/github.com/elastic/go-concert/.gitignore b/vendor/github.com/elastic/go-concert/.gitignore new file mode 100644 index 00000000000..1d74e21965c --- /dev/null +++ b/vendor/github.com/elastic/go-concert/.gitignore @@ -0,0 +1 @@ +.vscode/ diff --git a/vendor/github.com/elastic/go-concert/.travis.yml b/vendor/github.com/elastic/go-concert/.travis.yml new file mode 100644 index 00000000000..207a49a519c --- /dev/null +++ b/vendor/github.com/elastic/go-concert/.travis.yml @@ -0,0 +1,22 @@ +language: go + +os: + - linux + - osx + - windows + +go: + - '1.12' + - '1.13' + - '1.14' + +# Check we're testing the correct commit (Snippet from: https://github.com/travis-ci/travis-ci/issues/7459#issuecomment-287040521) +before_install: + - | + if [[ ("$TRAVIS_COMMIT" != "$(git rev-parse HEAD)") && ("$TRAVIS_COMMIT" != "$(git rev-parse HEAD^2 2>/dev/null)") ]]; then + echo "Commit $(git rev-parse HEAD) doesn't match expected commit $TRAVIS_COMMIT" + fi + +script: +- go env +- go test "./..." -v diff --git a/vendor/github.com/elastic/go-concert/CHANGELOG.md b/vendor/github.com/elastic/go-concert/CHANGELOG.md new file mode 100644 index 00000000000..51bd38554fd --- /dev/null +++ b/vendor/github.com/elastic/go-concert/CHANGELOG.md @@ -0,0 +1,32 @@ +# Change Log +All notable changes to this project will be documented in this file. +This project adheres to [Semantic Versioning](http://semver.org/). + +## [Unreleased] + +### Added + +### Changed + +### Deprecated + +### Removed + +### Fixed + +## [0.0.2] + +### Added + +- Add MultiErrGroup (#20). +- Add Group interface and TaskGroup implementation (#23). +- Add SafeWaitGroup (#23). +- Add ClosedGroup (#24). + +### Changed + +- FromCancel returns original context.Context, if input implements this type. Deadline and Value will not be ignored anymore. (#22) + + +[Unreleased]: https://github.com/elastic/go-ucfg/compare/v0.0.2...HEAD +[0.0.2]: https://github.com/elastic/go-ucfg/compare/v0.0.1...v0.0.2 diff --git a/vendor/github.com/elastic/go-concert/LICENSE b/vendor/github.com/elastic/go-concert/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/elastic/go-concert/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/go-concert/atomic/atomic.go b/vendor/github.com/elastic/go-concert/atomic/atomic.go new file mode 100644 index 00000000000..09e83614184 --- /dev/null +++ b/vendor/github.com/elastic/go-concert/atomic/atomic.go @@ -0,0 +1,94 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package atomic provides common primitive types with atomic accessors. +package atomic + +import a "sync/atomic" + +// Bool provides an atomic boolean type. +type Bool struct{ u Uint32 } + +// Int32 provides an atomic int32 type. +type Int32 struct{ value int32 } + +// Int64 provides an atomic int64 type. +type Int64 struct{ value int64 } + +// Uint32 provides an atomic uint32 type. +type Uint32 struct{ value uint32 } + +// Uint64 provides an atomic uint64 type. +type Uint64 struct{ value uint64 } + +func MakeBool(v bool) Bool { return Bool{MakeUint32(encBool(v))} } +func NewBool(v bool) *Bool { return &Bool{MakeUint32(encBool(v))} } +func (b *Bool) Load() bool { return b.u.Load() == 1 } +func (b *Bool) Store(v bool) { b.u.Store(encBool(v)) } +func (b *Bool) Swap(new bool) bool { return b.u.Swap(encBool(new)) == 1 } +func (b *Bool) CAS(old, new bool) bool { return b.u.CAS(encBool(old), encBool(new)) } + +func MakeInt32(v int32) Int32 { return Int32{v} } +func NewInt32(v int32) *Int32 { return &Int32{v} } +func (i *Int32) Load() int32 { return a.LoadInt32(&i.value) } +func (i *Int32) Store(v int32) { a.StoreInt32(&i.value, v) } +func (i *Int32) Swap(new int32) int32 { return a.SwapInt32(&i.value, new) } +func (i *Int32) Add(delta int32) int32 { return a.AddInt32(&i.value, delta) } +func (i *Int32) Sub(delta int32) int32 { return a.AddInt32(&i.value, -delta) } +func (i *Int32) Inc() int32 { return i.Add(1) } +func (i *Int32) Dec() int32 { return i.Add(-1) } +func (i *Int32) CAS(old, new int32) bool { return a.CompareAndSwapInt32(&i.value, old, new) } + +func MakeInt64(v int64) Int64 { return Int64{v} } +func NewInt64(v int64) *Int64 { return &Int64{v} } +func (i *Int64) Load() int64 { return a.LoadInt64(&i.value) } +func (i *Int64) Store(v int64) { a.StoreInt64(&i.value, v) } +func (i *Int64) Swap(new int64) int64 { return a.SwapInt64(&i.value, new) } +func (i *Int64) Add(delta int64) int64 { return a.AddInt64(&i.value, delta) } +func (i *Int64) Sub(delta int64) int64 { return a.AddInt64(&i.value, -delta) } +func (i *Int64) Inc() int64 { return i.Add(1) } +func (i *Int64) Dec() int64 { return i.Add(-1) } +func (i *Int64) CAS(old, new int64) bool { return a.CompareAndSwapInt64(&i.value, old, new) } + +func MakeUint32(v uint32) Uint32 { return Uint32{v} } +func NewUint32(v uint32) *Uint32 { return &Uint32{v} } +func (u *Uint32) Load() uint32 { return a.LoadUint32(&u.value) } +func (u *Uint32) Store(v uint32) { a.StoreUint32(&u.value, v) } +func (u *Uint32) Swap(new uint32) uint32 { return a.SwapUint32(&u.value, new) } +func (u *Uint32) Add(delta uint32) uint32 { return a.AddUint32(&u.value, delta) } +func (u *Uint32) Sub(delta uint32) uint32 { return a.AddUint32(&u.value, ^uint32(delta-1)) } +func (u *Uint32) Inc() uint32 { return u.Add(1) } +func (u *Uint32) Dec() uint32 { return u.Add(^uint32(0)) } +func (u *Uint32) CAS(old, new uint32) bool { return a.CompareAndSwapUint32(&u.value, old, new) } + +func MakeUint64(v uint64) Uint64 { return Uint64{v} } +func NewUint64(v uint64) *Uint64 { return &Uint64{v} } +func (u *Uint64) Load() uint64 { return a.LoadUint64(&u.value) } +func (u *Uint64) Store(v uint64) { a.StoreUint64(&u.value, v) } +func (u *Uint64) Swap(new uint64) uint64 { return a.SwapUint64(&u.value, new) } +func (u *Uint64) Add(delta uint64) uint64 { return a.AddUint64(&u.value, delta) } +func (u *Uint64) Sub(delta uint64) uint64 { return a.AddUint64(&u.value, ^uint64(delta-1)) } +func (u *Uint64) Inc() uint64 { return u.Add(1) } +func (u *Uint64) Dec() uint64 { return u.Add(^uint64(0)) } +func (u *Uint64) CAS(old, new uint64) bool { return a.CompareAndSwapUint64(&u.value, old, new) } + +func encBool(b bool) uint32 { + if b { + return 1 + } + return 0 +} diff --git a/vendor/github.com/elastic/go-concert/atomic/atomic32.go b/vendor/github.com/elastic/go-concert/atomic/atomic32.go new file mode 100644 index 00000000000..17c41392e69 --- /dev/null +++ b/vendor/github.com/elastic/go-concert/atomic/atomic32.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build 386 arm mips mipsle + +package atomic + +// atomic Uint/Int for 32bit systems + +// Uint provides an architecture specific atomic uint. +type Uint struct{ a Uint32 } + +// Int provides an architecture specific atomic uint. +type Int struct{ a Int32 } + +func MakeUint(v uint) Uint { return Uint{MakeUint32(uint32(v))} } +func NewUint(v uint) *Uint { return &Uint{MakeUint32(uint32(v))} } +func (u *Uint) Load() uint { return uint(u.a.Load()) } +func (u *Uint) Store(v uint) { u.a.Store(uint32(v)) } +func (u *Uint) Swap(new uint) uint { return uint(u.a.Swap(uint32(new))) } +func (u *Uint) Add(delta uint) uint { return uint(u.a.Add(uint32(delta))) } +func (u *Uint) Sub(delta uint) uint { return uint(u.a.Add(uint32(-delta))) } +func (u *Uint) Inc() uint { return uint(u.a.Inc()) } +func (u *Uint) Dec() uint { return uint(u.a.Dec()) } +func (u *Uint) CAS(old, new uint) bool { return u.a.CAS(uint32(old), uint32(new)) } + +func MakeInt(v int) Int { return Int{MakeInt32(int32(v))} } +func NewInt(v int) *Int { return &Int{MakeInt32(int32(v))} } +func (i *Int) Load() int { return int(i.a.Load()) } +func (i *Int) Store(v int) { i.a.Store(int32(v)) } +func (i *Int) Swap(new int) int { return int(i.a.Swap(int32(new))) } +func (i *Int) Add(delta int) int { return int(i.a.Add(int32(delta))) } +func (i *Int) Sub(delta int) int { return int(i.a.Add(int32(-delta))) } +func (i *Int) Inc() int { return int(i.a.Inc()) } +func (i *Int) Dec() int { return int(i.a.Dec()) } +func (i *Int) CAS(old, new int) bool { return i.a.CAS(int32(old), int32(new)) } diff --git a/vendor/github.com/elastic/go-concert/atomic/atomic64.go b/vendor/github.com/elastic/go-concert/atomic/atomic64.go new file mode 100644 index 00000000000..5d7de52026a --- /dev/null +++ b/vendor/github.com/elastic/go-concert/atomic/atomic64.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build amd64 arm64 ppc64 ppc64le mips64 mips64le s390x + +package atomic + +// atomic Uint/Int for 64bit systems + +// Uint provides an architecture specific atomic uint. +type Uint struct{ a Uint64 } + +// Int provides an architecture specific atomic uint. +type Int struct{ a Int64 } + +func MakeUint(v uint) Uint { return Uint{MakeUint64(uint64(v))} } +func NewUint(v uint) *Uint { return &Uint{MakeUint64(uint64(v))} } +func (u *Uint) Load() uint { return uint(u.a.Load()) } +func (u *Uint) Store(v uint) { u.a.Store(uint64(v)) } +func (u *Uint) Swap(new uint) uint { return uint(u.a.Swap(uint64(new))) } +func (u *Uint) Add(delta uint) uint { return uint(u.a.Add(uint64(delta))) } +func (u *Uint) Sub(delta uint) uint { return uint(u.a.Add(uint64(-delta))) } +func (u *Uint) Inc() uint { return uint(u.a.Inc()) } +func (u *Uint) Dec() uint { return uint(u.a.Dec()) } +func (u *Uint) CAS(old, new uint) bool { return u.a.CAS(uint64(old), uint64(new)) } + +func MakeInt(v int) Int { return Int{MakeInt64(int64(v))} } +func NewInt(v int) *Int { return &Int{MakeInt64(int64(v))} } +func (i *Int) Load() int { return int(i.a.Load()) } +func (i *Int) Store(v int) { i.a.Store(int64(v)) } +func (i *Int) Swap(new int) int { return int(i.a.Swap(int64(new))) } +func (i *Int) Add(delta int) int { return int(i.a.Add(int64(delta))) } +func (i *Int) Sub(delta int) int { return int(i.a.Add(int64(-delta))) } +func (i *Int) Inc() int { return int(i.a.Inc()) } +func (i *Int) Dec() int { return int(i.a.Dec()) } +func (i *Int) CAS(old, new int) bool { return i.a.CAS(int64(old), int64(new)) } diff --git a/vendor/github.com/elastic/go-concert/chorus/closeref.go b/vendor/github.com/elastic/go-concert/chorus/closeref.go new file mode 100644 index 00000000000..058b2a34c3c --- /dev/null +++ b/vendor/github.com/elastic/go-concert/chorus/closeref.go @@ -0,0 +1,143 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package chorus + +import ( + "errors" + "sync" + "time" +) + +// CloserFunc is the function called by the Closer on `Close()`. +type CloserFunc func() + +// ErrClosed is returned when the Closer is closed. +var ErrClosed = errors.New("closer is closed") + +// CloseRef implements a subset of the context.Context interface and it's use to synchronize +// the shutdown of multiple go-routines. +type CloseRef interface { + Done() <-chan struct{} + Err() error + Deadline() (time.Time, bool) + Value(key interface{}) interface{} +} + +// Closer implements a shutdown strategy when dealing with multiples go-routines, it creates a tree +// of Closer, when you call `Close()` on a parent the `Close()` method will be called on the current +// closer and any of the childs it may have and will remove the current node from the parent. +// +// NOTE: The `Close()` is reentrant but will propagate the close only once. +type Closer struct { + mu sync.Mutex + done chan struct{} + err error + parent *Closer + children map[*Closer]struct{} + callback CloserFunc +} + +// Close closes the closes and propagates the close to any child, on close the close callback will +// be called, this can be used for custom cleanup like closing a TCP socket. +func (c *Closer) Close() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + + if c.callback != nil { + c.callback() + } + + close(c.done) + + // propagate close to children. + if c.children != nil { + for child := range c.children { + child.Close() + } + c.children = nil + } + + c.err = ErrClosed + c.mu.Unlock() + + if c.parent != nil { + c.removeChild(c) + } +} + +// Done returns the synchronization channel, the channel will be closed if `Close()` was called on +// the current node or any parent it may have. +func (c *Closer) Done() <-chan struct{} { + return c.done +} + +// Err returns an error if the Closer was already closed. +func (c *Closer) Err() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +// Deadline implements the Deadline() method of the context.Context interface but will always return +// false. +func (c *Closer) Deadline() (time.Time, bool) { + return time.Time{}, false +} + +// Value implements the Value() method of the contxt.Context interface +func (c *Closer) Value(key interface{}) interface{} { + return nil +} + +func (c *Closer) removeChild(child *Closer) { + c.mu.Lock() + delete(c.children, child) + c.mu.Unlock() +} + +func (c *Closer) addChild(child *Closer) { + c.mu.Lock() + if c.children == nil { + c.children = make(map[*Closer]struct{}) + } + c.children[child] = struct{}{} + c.mu.Unlock() +} + +// WithCloser wraps a new closer into a child of an existing closer. +func WithCloser(parent *Closer, fn CloserFunc) *Closer { + child := &Closer{ + done: make(chan struct{}), + parent: parent, + callback: fn, + } + parent.addChild(child) + return child +} + +// NewCloser creates a new Closer. +func NewCloser(fn CloserFunc) *Closer { + return &Closer{ + done: make(chan struct{}), + callback: fn, + } +} diff --git a/vendor/github.com/elastic/go-concert/go.mod b/vendor/github.com/elastic/go-concert/go.mod new file mode 100644 index 00000000000..30da5937450 --- /dev/null +++ b/vendor/github.com/elastic/go-concert/go.mod @@ -0,0 +1,12 @@ +module github.com/elastic/go-concert + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/stretchr/testify v1.4.0 + github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec + go.uber.org/goleak v1.0.0 + golang.org/x/lint v0.0.0-20200130185559-910be7a94367 // indirect + golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2 // indirect +) diff --git a/vendor/github.com/elastic/go-concert/go.sum b/vendor/github.com/elastic/go-concert/go.sum new file mode 100644 index 00000000000..2a0dabfc98a --- /dev/null +++ b/vendor/github.com/elastic/go-concert/go.sum @@ -0,0 +1,54 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 h1:OHNw/6pXODJAB32NujjdQO/KIYQ3KAbHQfCzH81XdCs= +github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797/go.mod h1:pNWFTeQ+V1OYT/TzWpnWb6eQBdoXpdx+H+lrH97/Oyo= +github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec h1:HkZIDJrMKZHPsYhmH2XjTTSk1pbMCFfpxSnyzZUFm+k= +github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec/go.mod h1:Wp40HwmjM59FkDIVFfcCb9LzBbnc0XAMp8++hJuWvSU= +go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 h1:Yq9t9jnGoR+dBuitxdo9l6Q7xh/zOyNnYUtDKaQ3x0E= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2 h1:0sfSpGSa544Fwnbot3Oxq/U6SXqjty6Jy/3wRhVS7ig= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/elastic/go-concert/nocopy.go b/vendor/github.com/elastic/go-concert/nocopy.go new file mode 100644 index 00000000000..914cda8ad6e --- /dev/null +++ b/vendor/github.com/elastic/go-concert/nocopy.go @@ -0,0 +1,24 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package concert + +// Types using with noCopy will be flagged by `go vet`. +// See: https://github.com/golang/go/issues/8005#issuecomment-190753527 +type noCopy struct{} + +func (*noCopy) Lock() {} diff --git a/vendor/github.com/elastic/go-concert/refcount.go b/vendor/github.com/elastic/go-concert/refcount.go new file mode 100644 index 00000000000..3574e3507dc --- /dev/null +++ b/vendor/github.com/elastic/go-concert/refcount.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package concert + +import ( + "sync" + + "github.com/elastic/go-concert/atomic" +) + +// RefCount is an atomic reference counter. It can be used to track a shared +// resource it's lifetime and execute an action once it is clear the resource is +// not needed anymore. +// +// The zero value of RefCount is already in a valid state, which can be +// Released already. +type RefCount struct { + count atomic.Uint32 + + errMux sync.Mutex + err error + + Action func(err error) + OnError func(old, new error) error +} + +// refCountFree indicates when a RefCount.Release shall return true. It's +// chosen such that the zero value of RefCount is a valid value which will +// return true if Release is called without calling Retain before. +const refCountFree uint32 = ^uint32(0) +const refCountOops uint32 = refCountFree - 1 + +// Retain increases the ref count. +func (c *RefCount) Retain() { + if c.count.Inc() == 0 { + panic("retaining released ref count") + } +} + +// Release decreases the reference count. It returns true, if the reference count +// has reached a 'free' state. +// Releasing a reference count in a free state will trigger a panic. +// If an Action is configured, then this action will be run once the +// refcount becomes free. +func (c *RefCount) Release() bool { + switch c.count.Dec() { + case refCountFree: + if c.Action != nil { + c.Action(c.err) + } + return true + case refCountOops: + panic("ref count released too often") + default: + return false + } +} + +// Err returns the current error stored by the reference counter. +func (c *RefCount) Err() error { + c.errMux.Lock() + defer c.errMux.Unlock() + return c.err +} + +// Fail adds an error to the reference counter. +// OnError will be called if configured, so to compute the actual error. +// If OnError is not configured, the first error reported will be stored by +// the reference counter only. +// +// Fail releases the reference counter. +func (c *RefCount) Fail(err error) bool { + // use dummy function to handle the error, ensuring errMux.Unlock will be + // executed before the call to Release or in case OnError panics. + func() { + c.errMux.Lock() + defer c.errMux.Unlock() + if c.OnError != nil { + c.err = c.OnError(c.err, err) + } else if c.err == nil { + c.err = err + } + }() + + return c.Release() +} diff --git a/vendor/github.com/elastic/go-concert/signal.go b/vendor/github.com/elastic/go-concert/signal.go new file mode 100644 index 00000000000..097aaddc6d9 --- /dev/null +++ b/vendor/github.com/elastic/go-concert/signal.go @@ -0,0 +1,115 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package concert + +import ( + "context" + "sync" +) + +// OnceSignaler provides a channel that can only be closed once. +// In addition to the channel one can install callbacks to be executed +// if the signal is triggered. +// Once triggered all further close attempts will be ignored. +// +// The zero value is not valid. NewOnceSignaler must be used to create an +// instance backed by a channel. +type OnceSignaler struct { + once sync.Once + mu sync.Mutex + ch chan struct{} + fn func() +} + +// Canceled is the error returned when the signaler has been triggered. +var Canceled = context.Canceled + +var closedChan = func() <-chan struct{} { + ch := make(chan struct{}) + close(ch) + return ch +}() + +// ClosedChan returns a closed read only channel. +func ClosedChan() <-chan struct{} { + return closedChan +} + +// NewOnceSignaler create a new OnceSignaler. +func NewOnceSignaler() *OnceSignaler { + return &OnceSignaler{ + ch: make(chan struct{}), + } +} + +// OnSignal installs a callback that will be executed if the signal is +// triggered. The callback will be called immediately if the signal has already +// been triggered. +func (s *OnceSignaler) OnSignal(fn func()) { + s.mu.Lock() + defer s.mu.Unlock() + + select { + case <-s.ch: + fn() + default: + s.add(fn) + } +} + +func (s *OnceSignaler) add(fn func()) { + old := s.fn + if old == nil { + s.fn = fn + } else { + s.fn = func() { + old() + fn() + } + } +} + +// Trigger triggers the signal, closing the channel returned by Done and +// calling all callbacks. +func (s *OnceSignaler) Trigger() { + s.once.Do(func() { + s.mu.Lock() + defer s.mu.Unlock() + + close(s.ch) + if s.fn != nil { + s.fn() + s.fn = nil + } + }) +} + +// Done returns a channel one can listen on to check the the signaler has already been triggered. +func (s *OnceSignaler) Done() <-chan struct{} { + return s.ch +} + +// Err reports an Canceled event if the signaler has been triggered already +func (s *OnceSignaler) Err() error { + select { + case <-s.ch: + return Canceled + default: + return nil + } +} diff --git a/vendor/github.com/elastic/go-concert/unison/lockmanager.go b/vendor/github.com/elastic/go-concert/unison/lockmanager.go new file mode 100644 index 00000000000..b314ceb1e74 --- /dev/null +++ b/vendor/github.com/elastic/go-concert/unison/lockmanager.go @@ -0,0 +1,436 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package unison + +import ( + "errors" + "runtime" + "sync" + "time" + + "github.com/elastic/go-concert" + "github.com/elastic/go-concert/atomic" +) + +// LockManager gives access to a set of Locks by name. The lock manager can +// forcefully unlock a lock. Routines using a managed lock can use the +// LockSession to list for special Lock events. +// +// The zero value of LockManager is directly usable, but a LockManager must no +// be copied by value. +type LockManager struct { + initOnce sync.Once + mu sync.Mutex + table map[string]*lockEntry +} + +// ManagedLock is a mutex like structure that is managed by the LockManager. +// A managed lock can loose it's lock lease at any time. The LockX methods +// return a LockSession that can be used to listen for the current Locks state +// changes. +// +// The lock will automatically be released in case the ManagedLock is garbage +// collected. One should not rely on this behavior, but releaseing a zombie +// lock guarantees that other routines might eventually be able to make +// progress in case of fatal errors. +type ManagedLock struct { + key string + manager *LockManager + session *LockSession + entry *lockEntry +} + +// LockOption is used to pass additonal settings to all LockX methods of the ManagedLock. +type LockOption interface { + apply(l *ManagedLock) +} + +// LockSession provides signal with the current lock state. Lock sessions must +// not be reused, as each Lock operation returns a new Session object. +type LockSession struct { + isLocked atomic.Bool + done, unlocked, lost *concert.OnceSignaler +} + +// WithSignalCallbacks is a LockOption that configures additional callbacks to +// be executed on lock session state changes. +// A LockSession is valid until the lock has been Unlocked. The callbacks are +// registered with the lock session, and will not be called anymore once the LockSession is finalized. +type WithSignalCallbacks struct { + // Done is executed after the Unlocked or Lost event has been emitted. Done will never be executed twice, + // even if both events get emitted. + Done func() + + // Unlocked is called when the lock has been explicitely unlocked. + Unlocked func() + + // Lost is called when the Lock was force unlocked by the LockManager. + Lost func() +} + +// lockEntry is the shared lock instances that all ManagedLocks refer too. +// The lockEntry is held in the LockManagers table for as long as at least one +// ManagedLock holds the lock or is attempting to acquire the lock. +// The lockEntry is supposed to be created lazily and shall be deleted from the +// LockManager as early as possible. +type lockEntry struct { + session *LockSession + muInternal sync.Mutex // internal mutex + + // shared user mutex + Mutex + + // book keeping, so we can remove the entry from the lock manager if there + // are not more references to this entry. + key string + ref concert.RefCount +} + +// GC Finalizer for the ManagedLock. This variable is used for testing. +var managedLockFinalizer = (*ManagedLock).finalize + +// NewLockManager creates a new LockManager instance. +func NewLockManager() *LockManager { + m := &LockManager{} + m.init() + return m +} + +func (m *LockManager) init() { + m.initOnce.Do(func() { + m.table = map[string]*lockEntry{} + }) +} + +// Access gives access to a ManagedLock. The ManagedLock MUST NOT be used by +// more than one go-routine. If 2 go-routines need to coordinate on a lock +// managed by the same LockManager, then 2 individual ManagedLock instances for +// the same key must be created. +func (m *LockManager) Access(key string) *ManagedLock { + m.init() + return newManagedLock(m, key) +} + +// ForceUnlock unlocks the ManagedLock that is currently holding the Lock. +// It is advised to listen on the LockSession.LockLost or LockSession.Done events. +func (m *LockManager) ForceUnlock(key string) { + m.init() + + m.mu.Lock() + entry := m.findEntry(key) + m.mu.Unlock() + + entry.muInternal.Lock() + session := entry.session + if session != nil { + entry.session = nil + if session.isLocked.Load() { + entry.Mutex.Unlock() + } + + } + entry.muInternal.Unlock() + + session.forceUnlock() + m.releaseEntry(entry) +} + +// ForceUnlockAll force unlocks all current locks that are managed by this lock manager. +func (m *LockManager) ForceUnlockAll() { + m.init() + + m.mu.Lock() + for _, entry := range m.table { + entry.muInternal.Lock() + session := entry.session + if session != nil && session.isLocked.Load() { + entry.session = nil + entry.Mutex.Unlock() + } + entry.muInternal.Unlock() + + session.forceUnlock() + } + m.mu.Unlock() +} + +func (m *LockManager) createEntry(key string) *lockEntry { + entry := &lockEntry{ + Mutex: MakeMutex(), + key: key, + } + m.table[key] = entry + return entry +} + +func (m *LockManager) findEntry(key string) *lockEntry { + entry := m.table[key] + if entry != nil { + entry.ref.Retain() + } + return entry +} + +func (m *LockManager) findOrCreate(key string, create bool) (entry *lockEntry) { + m.mu.Lock() + defer m.mu.Unlock() + if entry = m.findEntry(key); entry == nil && create { + entry = m.createEntry(key) + } + return entry +} + +func (m *LockManager) releaseEntry(entry *lockEntry) { + m.mu.Lock() + defer m.mu.Unlock() + if entry.ref.Release() { + delete(m.table, entry.key) + } +} + +func newManagedLock(mngr *LockManager, key string) *ManagedLock { + return &ManagedLock{key: key, manager: mngr} +} + +func (ml *ManagedLock) finalize() { + defer ml.unlink() + if ml.session != nil { + ml.doUnlock() + } +} + +// Key reports the key the lock will lock/unlock. +func (ml *ManagedLock) Key() string { + return ml.key +} + +// Lock the key. It blocks until the lock becomes +// available. +// Lock returns a LockSession, which is valid until after Unlock is called. +// +// Note: After loosing a lock, the ManagedLock must still call 'Unlock' in +// order to be reusable. +func (ml *ManagedLock) Lock(opts ...LockOption) *LockSession { + checkNoActiveLockSession(ml.session) + + ml.link(true) + ml.entry.Lock() + return ml.markLocked(opts) +} + +// TryLock attempts to acquire the lock. If the lock is already held by another +// shared lock, then TryLock will return false. +// +// On success a LockSession will be returned as well. The Lock session is valid +// until Unlock has been called. +func (ml *ManagedLock) TryLock(opts ...LockOption) (*LockSession, bool) { + checkNoActiveLockSession(ml.session) + + ml.link(true) + if !ml.entry.TryLock() { + ml.unlink() + return nil, false + } + + return ml.markLocked(opts), true +} + +// LockTimeout will try to acquire lock. A failed lock attempt +// returns false, once the amount of configured duration has been passed. +// +// If duration is 0, then the call behaves like TryLock. +// If duration is <0, then the call behaves like Lock +// +// On success a LockSession will be returned as well. The Lock session is valid +// until Unlock has been called. +func (ml *ManagedLock) LockTimeout(duration time.Duration, opts ...LockOption) (*LockSession, bool) { + checkNoActiveLockSession(ml.session) + + ml.link(true) + if !ml.entry.LockTimeout(duration) { + ml.unlink() + return nil, false + } + return ml.markLocked(opts), ml.IsLocked() +} + +// LockContext tries to acquire the lock. The Log operation can be cancelled by +// the context. LockContext returns nil on success, otherwise the error value +// returned by context.Err, which MUST NOT return nil after cancellation. +// +// On success a LockSession will be returned as well. The Lock session is valid +// until Unlock has been called. +func (ml *ManagedLock) LockContext(context doneContext, opts ...LockOption) (*LockSession, error) { + checkNoActiveLockSession(ml.session) + + ml.link(true) + err := ml.entry.LockContext(context) + if err != nil { + ml.unlink() + return nil, err + } + + return ml.markLocked(opts), nil +} + +// Unlock releases a resource. +func (ml *ManagedLock) Unlock() { + checkActiveLockSession(ml.session) + ml.doUnlock() +} + +func (ml *ManagedLock) doUnlock() { + session, entry := ml.session, ml.entry + + // The lock can be forcefully and asynchronously unreleased by the + // LockManager. We can only unlock the entry, iff our mutex session is + // still locked and the entries lock session still matches our lock session. + // If none of these is the case, then the session was already closed. + entry.muInternal.Lock() + if session == entry.session { + entry.session = nil + if session.isLocked.Load() { + entry.Unlock() + } + } + entry.muInternal.Unlock() + + // always signal unlock, independent of the current state of the registry. + // This will trigger the 'Unlocked' signal, indicating to session listeners that + // the routine holding the lock deliberately unlocked the ManagedLock. + // Note: a managed lock must always be explicitely unlocked, no matter of the + // session state. + session.unlock() + + ml.unlink() + ml.markUnlocked() +} + +// IsLocked checks if the resource currently holds the lock for the key +func (ml *ManagedLock) IsLocked() bool { + return ml.session != nil && ml.session.isLocked.Load() +} + +// link ensures that the managed lock is 'linked' to the shared lockEntry in +// the LockManagers table. +func (ml *ManagedLock) link(create bool) { + if ml.entry == nil { + ml.entry = ml.manager.findOrCreate(ml.key, create) + } +} + +// unlink removes the references to the table entry. +func (ml *ManagedLock) unlink() { + if ml.entry == nil { + return + } + + entry := ml.entry + ml.entry = nil + ml.manager.releaseEntry(entry) +} + +func (ml *ManagedLock) markLocked(opts []LockOption) *LockSession { + session := newLockSession() + ml.session = session + + for i := range opts { + opts[i].apply(ml) + } + + ml.entry.muInternal.Lock() + ml.entry.session = session + ml.entry.muInternal.Unlock() + + // in case we miss an unlock operation (programmer error or panic that has + // been caught) we set a finalizer to eventually free the resource. + // The Unlock operation will unsert the finalizer. + runtime.SetFinalizer(ml, managedLockFinalizer) + return session +} + +func (ml *ManagedLock) markUnlocked() { + runtime.SetFinalizer(ml, nil) +} + +func newLockSession() *LockSession { + return &LockSession{ + isLocked: atomic.MakeBool(true), + done: concert.NewOnceSignaler(), + unlocked: concert.NewOnceSignaler(), + lost: concert.NewOnceSignaler(), + } +} + +// Done returns a channel to wait for a final signal. The signal will become available +// if the session has been finished due to an Unlock or Forced Unlock. +func (s *LockSession) Done() <-chan struct{} { + if s == nil { + return concert.ClosedChan() + } + return s.done.Done() +} + +// Unlocked returns a channel, that will signal that the ManagedLock was +// unlocked. A ManagedLock can still be unlocked (which will trigger the +// signal), even after loosing the actual Lock. +func (s *LockSession) Unlocked() <-chan struct{} { + if s == nil { + return concert.ClosedChan() + } + return s.unlocked.Done() +} + +// LockLost return a channel, that will signal that the ManagedLock has lost +// its lock status. When receiving this signal, ongoing operations should be +// cancelled, or results should be ignored, as other MangedLocks might be able +// to acquire the lock the moment the current session has lost the lock. +func (s *LockSession) LockLost() <-chan struct{} { + if s == nil { + return concert.ClosedChan() + } + return s.lost.Done() +} + +func (s *LockSession) unlock() { s.doUnlock(s.unlocked) } +func (s *LockSession) forceUnlock() { s.doUnlock(s.lost) } +func (s *LockSession) doUnlock(kind *concert.OnceSignaler) { + s.isLocked.Store(false) + kind.Trigger() + s.done.Trigger() +} + +func (opt WithSignalCallbacks) apply(lock *ManagedLock) { + lock.session.done.OnSignal(opt.Done) + lock.session.unlocked.OnSignal(opt.Unlocked) + lock.session.lost.OnSignal(opt.Lost) +} + +func checkNoActiveLockSession(s *LockSession) { + invariant(s == nil, "lock still has an active lock session, missing call to Unlock to finish the session") +} + +func checkActiveLockSession(s *LockSession) { + invariant(s != nil, "no active lock session") +} + +func invariant(b bool, message string) { + if !b { + panic(errors.New(message)) + } +} diff --git a/vendor/github.com/elastic/go-concert/unison/multierrgroup.go b/vendor/github.com/elastic/go-concert/unison/multierrgroup.go new file mode 100644 index 00000000000..14cac6496c0 --- /dev/null +++ b/vendor/github.com/elastic/go-concert/unison/multierrgroup.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package unison + +import ( + "context" + "sync" +) + +// MultiErrGroup is a collection of goroutines working on subtasks +// concurrently. The group waits until all subtasks have finished and collects +// all errors encountered. +// +// The zero value of MultiErrGroup is a valid group. +type MultiErrGroup struct { + mu sync.Mutex + errs []error + wg sync.WaitGroup +} + +// Go starts a new go-routine, collecting errors encounted into the +// MultiErrGroup. +func (g *MultiErrGroup) Go(fn func() error) { + g.wg.Add(1) + go func() { + defer g.wg.Done() + err := fn() + if err != nil && err != context.Canceled { + g.mu.Lock() + defer g.mu.Unlock() + g.errs = append(g.errs, err) + } + }() +} + +// Wait waits until all go-routines have been stopped and returns all errors +// encountered. +func (g *MultiErrGroup) Wait() []error { + g.wg.Wait() + g.mu.Lock() + defer g.mu.Unlock() + return g.errs +} diff --git a/vendor/github.com/elastic/go-concert/unison/mutex.go b/vendor/github.com/elastic/go-concert/unison/mutex.go new file mode 100644 index 00000000000..bbbbf2dcb44 --- /dev/null +++ b/vendor/github.com/elastic/go-concert/unison/mutex.go @@ -0,0 +1,134 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package unison + +import ( + "time" +) + +// Mutex provides a mutex based on go channels. The lock operations support +// timeout or cancellation by a context. Moreover one can try to lock the mutex +// from within a select statement when using Await. +// +// The zero value of Mutex will not be able to Lock the mutex ever. The Lock +// method will never return. Calling Unlock will panic. +type Mutex struct { + ch chan struct{} +} + +// doneContext is a subset of context.Context, to allow more restrained +// cancellation types as well. +type doneContext interface { + Done() <-chan struct{} + Err() error +} + +// MakeMutex creates a mutex. +func MakeMutex() Mutex { + ch := make(chan struct{}, 1) + ch <- struct{}{} + return Mutex{ch: ch} +} + +// Lock blocks until the mutex has been acquired. +// The zero value of Mutex will block forever. +func (c Mutex) Lock() { + <-c.ch +} + +// LockTimeout will try to lock the mutex. A failed lock attempt +// returns false, once the amount of configured duration has been passed. +// +// If duration is 0, then the call behaves like TryLock. +// If duration is <0, then the call behaves like Lock if the Mutex has been +// initialized, otherwise fails. +// +// The zero value of Mutex will never succeed. +func (c Mutex) LockTimeout(duration time.Duration) bool { + switch { + case duration == 0: + return c.TryLock() + case duration < 0: + if c.ch == nil { + return false + } + c.Lock() + return true + } + + timer := time.NewTimer(duration) + select { + case <-c.ch: + timer.Stop() + return true + case <-timer.C: + select { + case <-c.ch: // still lock, if timer and lock occured at the same time + return true + default: + return false + } + } +} + +// LockContext tries to lock the mutex. The Log operation can be cancelled by +// the context. LockContext returns nil on success, otherwise the error value +// returned by context.Err, which MUST NOT return nil after cancellation. +func (c Mutex) LockContext(context doneContext) error { + select { + case <-context.Done(): + return context.Err() + default: + } + + select { + case <-c.ch: + return nil + case <-context.Done(): + return context.Err() + } +} + +// TryLock attempts to lock the mutex. If the mutex has been already locked +// false is returned. +func (c Mutex) TryLock() bool { + select { + case <-c.ch: + return true + default: + return false + } +} + +// Await returns a channel that will be triggered if the lock attempt did succeed. +// One can use the channel with select-case. The mutex is assumed to be locked if +// the branch waiting on the mutex has been triggered. +func (c Mutex) Await() <-chan struct{} { + return c.ch +} + +// Unlock unlocks the mutex. +// +// The zero value of Mutex will panic. +func (c Mutex) Unlock() { + select { + case c.ch <- struct{}{}: + default: + panic("unlock on unlocked mutex") + } +} diff --git a/vendor/github.com/elastic/go-concert/unison/safewaitgroup.go b/vendor/github.com/elastic/go-concert/unison/safewaitgroup.go new file mode 100644 index 00000000000..3e3a7e2cc10 --- /dev/null +++ b/vendor/github.com/elastic/go-concert/unison/safewaitgroup.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package unison + +import ( + "errors" + "sync" +) + +// SafeWaitGroup provides a safe alternative to WaitGroup, that instead of +// panicing returns an error when Wait has been called. +type SafeWaitGroup struct { + mu sync.RWMutex + wg sync.WaitGroup + closed bool +} + +// ErrGroupClosed indicates that the WaitGroup is currently closed, and no more +// routines can be started. +var ErrGroupClosed = errors.New("group closed") + +// Add adds the delta to the WaitGroup counter. +// If the counter becomes 0, all goroutines are blocked on Wait will continue. +// +// Add returns an error if 'Wait' has already been called, indicating that no more +// go-routines should be started. +func (s *SafeWaitGroup) Add(n int) error { + if n < 0 { + s.wg.Add(n) + return nil + } + + s.mu.RLock() + defer s.mu.RUnlock() + if s.closed { + return ErrGroupClosed + } + + s.wg.Add(n) + return nil +} + +// Done decrements the WaitGroup counter. +func (s *SafeWaitGroup) Done() { + s.wg.Done() +} + +// Close marks the wait group as closed. All calls to Add will fail with ErrGroupClosed after +// close has been called. Close does not wait until the WaitGroup counter has +// reached zero, but will return immediately. Use Wait to wait for the counter to become 0. +func (s *SafeWaitGroup) Close() { + s.mu.Lock() + defer s.mu.Unlock() + s.closed = true +} + +// Wait closes the WaitGroup and blocks until the WaitGroup counter is zero. +// Add will return errors the moment 'Wait' has been called. +func (s *SafeWaitGroup) Wait() { + s.Close() + s.wg.Wait() +} diff --git a/vendor/github.com/elastic/go-concert/unison/taskgroup.go b/vendor/github.com/elastic/go-concert/unison/taskgroup.go new file mode 100644 index 00000000000..7faab4fae18 --- /dev/null +++ b/vendor/github.com/elastic/go-concert/unison/taskgroup.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package unison + +import ( + "sync" + + "github.com/elastic/go-concert/chorus" + "github.com/urso/sderr" +) + +// Group interface, that can be used to start tasks. The tasks started will +// spawn go-routines, and will get a shutdown signal by the provided Canceler. +type Group interface { + // Go method returns an error if the task can not be started. The error + // returned by the task itself is not supposed to be returned, as the error is + // assumed to be generated asynchronously. + Go(fn func(Canceler) error) error +} + +// Canceler interface, that can be used to pass along some shutdown signal to +// child goroutines. +type Canceler interface { + Done() <-chan struct{} + Err() error +} + +type closedGroup struct { + err error +} + +// ClosedGroup creates a Group that always fails to start a go-routine. +// Go will return reportedError on each attempt to create a go routine. +// If reportedError is nil, ErrGroupClosed will be used. +func ClosedGroup(reportedError error) Group { + if reportedError == nil { + reportedError = ErrGroupClosed + } + return &closedGroup{err: reportedError} +} + +func (c closedGroup) Go(_ func(Canceler) error) error { + return c.err +} + +// TaskGroup implements the Group interface. Once the group is shutting down, +// no more goroutines can be created via Go. +// The Stop method of TaskGroup will block until all sub-tasks have returned. +// Errors from sub-tasks are collected. The Stop method collects all errors and returns +// a single error summarizing all errors encountered. +// +// By default sub-tasks continue running if any task did encounter an error. +// This behavior can be modified by setting StopOnError. +// +// The zero value of TaskGroup is fully functional. StopOnError must not be set after +// the first go-routine has been spawned. +type TaskGroup struct { + // StopOnError configures the behavior when a sub-task failed. If not set + // all other tasks will continue to run. If the function return true, a + // shutdown signal is passed, and Go will fail on attempts to start new + // tasks. + StopOnError func(err error) bool + + mu sync.Mutex + errs []error + wg SafeWaitGroup + + initOnce sync.Once + closer *chorus.Closer +} + +var _ Group = (*TaskGroup)(nil) + +// init initializes internal state the first time the group is actively used. +func (t *TaskGroup) init() { + t.initOnce.Do(func() { + t.closer = chorus.NewCloser(nil) + }) +} + +// Go starts a new go-routine and passes a Canceler to signal group shutdown. +// Errors returned by the function are collected and finally returned on Stop. +// If the group was stopped before calling Go, then Go will return the +// ErrGroupClosed error. +func (t *TaskGroup) Go(fn func(Canceler) error) error { + t.init() + + if err := t.wg.Add(1); err != nil { + return err + } + + go func() { + defer t.wg.Done() + err := fn(t.closer) + if err != nil { + t.mu.Lock() + t.errs = append(t.errs, err) + t.mu.Unlock() + + if t.StopOnError != nil && t.StopOnError(err) { + t.wg.Close() + t.closer.Close() + } + } + }() + + return nil +} + +func (t *TaskGroup) wait() []error { + t.wg.Wait() + t.mu.Lock() + defer t.mu.Unlock() + return t.errs +} + +// Stop sends a shutdown signal to all tasks, and waits for them to finish. +// It returns an error that contains all errors encountered. +func (t *TaskGroup) Stop() error { + t.init() + t.closer.Close() + errs := t.wait() + if len(errs) > 0 { + return sderr.WrapAll(errs, "task failures") + } + return nil +} diff --git a/vendor/github.com/elastic/go-concert/unison/txlock.go b/vendor/github.com/elastic/go-concert/unison/txlock.go new file mode 100644 index 00000000000..cf1edb8d344 --- /dev/null +++ b/vendor/github.com/elastic/go-concert/unison/txlock.go @@ -0,0 +1,192 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package unison + +import "sync" + +// TxLock provides locking support for transactional updates with multiple concurrent readers +// and one writer. Unlike sync.RWLock, the writer and readers can coexist. +// Users of TxLock must ensure proper isolation, between writers/readers. +// Changes by the writer must not be accessible by readers yet. A writer should +// hold the exclusive lock before making the changes available to others. +// +// Lock types: +// - Shared: Shared locks are used by readonly loads. Multiple readers +// can co-exist with one active writer. +// - Reserved: Writer use the reserved lock. Once locked no other +// writer can acquire the Reserved lock. The shared lock can +// still be locked by concurrent readers. +// - Pending: The pending lock is used by the writer to signal +// a write is about to be committed. After acquiring the pending +// lock no new reader is allowed to acquire the shared lock. +// Readers will have to wait until the pending lock is released. +// Existing readers can still coexist, but no new reader is +// allowed. +// - Exclusive: Once the exclusive lock is acquired by a write transaction, +// No other active transactions/locks exist anymore. +// Locking the exclusive lock blocks until the shared lock has been +// released by all readers. +// +// Each Locktype can be accessed using `(*lock).()`. Each lock type +// implements a `Lock` and `Unlock` method. +// +// The zero value of TxLock must not be used. +type TxLock struct { + mu sync.Mutex + + // conditions + mutexes + shared *sync.Cond + exclusive *sync.Cond + reserved sync.Mutex + + // state + sharedCount uint + reservedSet bool + pendingSet bool +} + +// TxSharedLock is used by readers to lock the shared lock on a TxLock. +type TxSharedLock TxLock + +// TxReservedLock is used by writers to lock the reserved lock on a TxLock. +// Only one go-routine is allowed to acquire the reserved lock at a time. +type TxReservedLock TxLock + +// TxPendingLock is used by writers to signal the TxLock that the shared lock +// can not be acquired anymore. Readers will unblock once Unlock on the pending +// lock is called. +type TxPendingLock TxLock + +// TxExclusiveLock is used by writers to wait for exclusive access to all resources. +// The writer should make changes visible to future readers only after acquiring the +// exclusive lock. +type TxExclusiveLock TxLock + +// NewTxLock creates a new initialized TxLock instance. +func NewTxLock() *TxLock { + l := &TxLock{} + l.shared = sync.NewCond(&l.mu) + l.exclusive = sync.NewCond(&l.mu) + return l +} + +// Get returns the standard Locker for the given transaction type. +func (l *TxLock) Get(readonly bool) sync.Locker { + if readonly { + return l.Shared() + } + return l.Reserved() +} + +// Shared returns the files shared locker. +func (l *TxLock) Shared() *TxSharedLock { return (*TxSharedLock)(l) } + +// Reserved returns the files reserved locker. +func (l *TxLock) Reserved() *TxReservedLock { return (*TxReservedLock)(l) } + +// Pending returns the files pending locker. +func (l *TxLock) Pending() *TxPendingLock { return (*TxPendingLock)(l) } + +// Exclusive returns the files exclusive locker. +func (l *TxLock) Exclusive() *TxExclusiveLock { return (*TxExclusiveLock)(l) } + +// Lock locks the shared lock. It blocks as long as the pending lock +// is in use. +func (l *TxSharedLock) Lock() { + waitCond(l.shared, l.check, l.inc) +} + +// Unlock unlocks the shared lock. Unlocking potentially unblocks a waiting +// exclusive lock. +func (l *TxSharedLock) Unlock() { + withLocker(&l.mu, l.dec) +} + +func (l *TxSharedLock) check() bool { return !l.pendingSet } +func (l *TxSharedLock) inc() { l.sharedCount++ } +func (l *TxSharedLock) dec() { + l.sharedCount-- + if l.sharedCount == 0 { + l.exclusive.Signal() + } +} + +// Lock acquires the reserved lock. Only one go-routine can hold the reserved +// lock at a time. The reserved lock should only be acquire by writers. +// Writers must not acquire the shared lock. +func (l *TxReservedLock) Lock() { + l.reserved.Lock() + l.reservedSet = true +} + +// Unlock releases the reserved lock. +func (l *TxReservedLock) Unlock() { + l.reservedSet = false + l.reserved.Unlock() +} + +// Lock acquires the pending lock. The reserved lock must be acquired before. +func (l *TxPendingLock) Lock() { + l.mu.Lock() + defer l.mu.Unlock() + + if !l.reservedSet { + panic("reserved lock must be set when acquiring the pending lock") + } + + l.pendingSet = true +} + +// Unlock releases the pending lock, potentially unblocking waiting readers. +func (l *TxPendingLock) Unlock() { + l.mu.Lock() + l.pendingSet = false + l.mu.Unlock() + l.shared.Broadcast() +} + +// Lock acquires the exclusive lock. Once acquired it is guaranteed that no +// other reader or writer go-routine exists. +func (l *TxExclusiveLock) Lock() { + if !l.pendingSet { + panic("the pending lock must be set when acquiring the exclusive lock") + } + waitCond(l.exclusive, l.check, func() {}) +} + +// Unlock is a noop. It guarantees that TxExclusiveLock is compatible to sync.Locker. +func (l *TxExclusiveLock) Unlock() {} + +func (l *TxExclusiveLock) check() bool { + return l.sharedCount == 0 +} + +func waitCond(c *sync.Cond, check func() bool, upd func()) { + withLocker(c.L, func() { + for !check() { + c.Wait() + } + upd() + }) +} + +func withLocker(l sync.Locker, fn func()) { + l.Lock() + defer l.Unlock() + fn() +} diff --git a/vendor/golang.org/x/lint/go.mod b/vendor/golang.org/x/lint/go.mod index 44179f3a422..b32309c45fd 100644 --- a/vendor/golang.org/x/lint/go.mod +++ b/vendor/golang.org/x/lint/go.mod @@ -2,4 +2,4 @@ module golang.org/x/lint go 1.11 -require golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f +require golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 diff --git a/vendor/golang.org/x/lint/go.sum b/vendor/golang.org/x/lint/go.sum index 539c98a94a9..2ad45cae246 100644 --- a/vendor/golang.org/x/lint/go.sum +++ b/vendor/golang.org/x/lint/go.sum @@ -1,8 +1,12 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/golang.org/x/mod/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS new file mode 100644 index 00000000000..733099041f8 --- /dev/null +++ b/vendor/golang.org/x/mod/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/internal/module/module.go b/vendor/golang.org/x/mod/module/module.go similarity index 58% rename from vendor/golang.org/x/tools/internal/module/module.go rename to vendor/golang.org/x/mod/module/module.go index 9a4edb9dec1..21f123957da 100644 --- a/vendor/golang.org/x/tools/internal/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -2,8 +2,86 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package module defines the module.Version type -// along with support code. +// Package module defines the module.Version type along with support code. +// +// The module.Version type is a simple Path, Version pair: +// +// type Version struct { +// Path string +// Version string +// } +// +// There are no restrictions imposed directly by use of this structure, +// but additional checking functions, most notably Check, verify that +// a particular path, version pair is valid. +// +// Escaped Paths +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the escaped form be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe escaped form that +// leaves most paths unaltered. +// +// The safe escaped form is to replace every uppercase letter +// with an exclamation mark followed by the letter's lowercase equivalent. +// +// For example, +// +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the escaped form is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to escape a literal !. +// +// Unicode Restrictions +// +// Today, paths are disallowed from using Unicode. +// +// Although paths are currently disallowed from using Unicode, +// we would like at some point to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention for escaping them in the file system. +// But there are at least two subtle considerations. +// +// First, note that not all case-fold equivalent distinct runes +// form an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are three distinct runes that case-fold to each other. +// When we do add Unicode letters, we must not assume that upper/lower +// are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would escape as "!!k", or perhaps as "(212A)". +// +// Second, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. package module // IMPORTANT NOTE @@ -24,22 +102,91 @@ import ( "unicode" "unicode/utf8" - "golang.org/x/tools/internal/semver" + "golang.org/x/mod/semver" + errors "golang.org/x/xerrors" ) -// A Version is defined by a module path and version pair. +// A Version (for clients, a module.Version) is defined by a module path and version pair. +// These are stored in their plain (unescaped) form. type Version struct { + // Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2". Path string // Version is usually a semantic version in canonical form. - // There are two exceptions to this general rule. + // There are three exceptions to this general rule. // First, the top-level target of a build has no specific version // and uses Version = "". // Second, during MVS calculations the version "none" is used // to represent the decision to take no version of a given module. + // Third, filesystem paths found in "replace" directives are + // represented by a path with an empty version. Version string `json:",omitempty"` } +// String returns the module version syntax Path@Version. +func (m Version) String() string { + return m.Path + "@" + m.Version +} + +// A ModuleError indicates an error specific to a module. +type ModuleError struct { + Path string + Version string + Err error +} + +// VersionError returns a ModuleError derived from a Version and error, +// or err itself if it is already such an error. +func VersionError(v Version, err error) error { + var mErr *ModuleError + if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version { + return err + } + return &ModuleError{ + Path: v.Path, + Version: v.Version, + Err: err, + } +} + +func (e *ModuleError) Error() string { + if v, ok := e.Err.(*InvalidVersionError); ok { + return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err) + } + if e.Version != "" { + return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err) + } + return fmt.Sprintf("module %s: %v", e.Path, e.Err) +} + +func (e *ModuleError) Unwrap() error { return e.Err } + +// An InvalidVersionError indicates an error specific to a version, with the +// module path unknown or specified externally. +// +// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError +// must not wrap a ModuleError. +type InvalidVersionError struct { + Version string + Pseudo bool + Err error +} + +// noun returns either "version" or "pseudo-version", depending on whether +// e.Version is a pseudo-version. +func (e *InvalidVersionError) noun() string { + if e.Pseudo { + return "pseudo-version" + } + return "version" +} + +func (e *InvalidVersionError) Error() string { + return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err) +} + +func (e *InvalidVersionError) Unwrap() error { return e.Err } + // Check checks that a given module path, version pair is valid. // In addition to the path being a valid module path // and the version being a valid semantic version, @@ -51,17 +198,14 @@ func Check(path, version string) error { return err } if !semver.IsValid(version) { - return fmt.Errorf("malformed semantic version %v", version) + return &ModuleError{ + Path: path, + Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")}, + } } _, pathMajor, _ := SplitPathVersion(path) - if !MatchPathMajor(version, pathMajor) { - if pathMajor == "" { - pathMajor = "v0 or v1" - } - if pathMajor[0] == '.' { // .v1 - pathMajor = pathMajor[1:] - } - return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor) + if err := CheckPathMajor(version, pathMajor); err != nil { + return &ModuleError{Path: path, Err: err} } return nil } @@ -79,7 +223,7 @@ func firstPathOK(r rune) bool { // Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. // This matches what "go get" has historically recognized in import paths. // TODO(rsc): We would like to allow Unicode letters, but that requires additional -// care in the safe encoding (see note below). +// care in the safe encoding (see "escaped paths" above). func pathOK(r rune) bool { if r < utf8.RuneSelf { return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || @@ -94,7 +238,7 @@ func pathOK(r rune) bool { // For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. // If we expand the set of allowed characters here, we have to // work harder at detecting potential case-folding and normalization collisions. -// See note about "safe encoding" below. +// See note about "escaped paths" above. func fileNameOK(r rune) bool { if r < utf8.RuneSelf { // Entire set of ASCII punctuation, from which we remove characters: @@ -120,6 +264,17 @@ func fileNameOK(r rune) bool { } // CheckPath checks that a module path is valid. +// A valid module path is a valid import path, as checked by CheckImportPath, +// with two additional constraints. +// First, the leading path element (up to the first slash, if any), +// by convention a domain name, must contain only lower-case ASCII letters, +// ASCII digits, dots (U+002E), and dashes (U+002D); +// it must contain at least one dot and cannot start with a dash. +// Second, for a final path element of the form /vN, where N looks numeric +// (ASCII digits and dots) must not begin with a leading zero, must not be /v1, +// and must not contain any dots. For paths beginning with "gopkg.in/", +// this second requirement is replaced by a requirement that the path +// follow the gopkg.in server's conventions. func CheckPath(path string) error { if err := checkPath(path, false); err != nil { return fmt.Errorf("malformed module path %q: %v", path, err) @@ -149,6 +304,20 @@ func CheckPath(path string) error { } // CheckImportPath checks that an import path is valid. +// +// A valid import path consists of one or more valid path elements +// separated by slashes (U+002F). (It must not begin with nor end in a slash.) +// +// A valid path element is a non-empty string made up of +// ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// It must not begin or end with a dot (U+002E), nor contain two dots in a row. +// +// The element prefix up to the first dot must not be a reserved file name +// on Windows, regardless of case (CON, com1, NuL, and so on). +// +// CheckImportPath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. func CheckImportPath(path string) error { if err := checkPath(path, false); err != nil { return fmt.Errorf("malformed import path %q: %v", path, err) @@ -169,8 +338,8 @@ func checkPath(path string, fileName bool) error { if path == "" { return fmt.Errorf("empty string") } - if strings.Contains(path, "..") { - return fmt.Errorf("double dot") + if path[0] == '-' { + return fmt.Errorf("leading dash") } if strings.Contains(path, "//") { return fmt.Errorf("double slash") @@ -226,13 +395,24 @@ func checkElem(elem string, fileName bool) error { } for _, bad := range badWindowsNames { if strings.EqualFold(bad, short) { - return fmt.Errorf("disallowed path element %q", elem) + return fmt.Errorf("%q disallowed as path element component on Windows", short) } } return nil } -// CheckFilePath checks whether a slash-separated file path is valid. +// CheckFilePath checks that a slash-separated file path is valid. +// The definition of a valid file path is the same as the definition +// of a valid import path except that the set of allowed characters is larger: +// all Unicode letters, ASCII digits, the ASCII space character (U+0020), +// and the ASCII punctuation characters +// “!#$%&()+,-.=@[]^_{}~”. +// (The excluded punctuation characters, " * < > ? ` ' | / \ and :, +// have special meanings in certain shells or operating systems.) +// +// CheckFilePath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. func CheckFilePath(path string) error { if err := checkPath(path, true); err != nil { return fmt.Errorf("malformed file path %q: %v", path, err) @@ -271,6 +451,9 @@ var badWindowsNames = []string{ // and version is either empty or "/vN" for N >= 2. // As a special case, gopkg.in paths are recognized directly; // they require ".vN" instead of "/vN", and for all N, not just N >= 2. +// SplitPathVersion returns with ok = false when presented with +// a path whose last path element does not satisfy the constraints +// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2". func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { if strings.HasPrefix(path, "gopkg.in/") { return splitGopkgIn(path) @@ -319,20 +502,65 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { // MatchPathMajor reports whether the semantic version v // matches the path major version pathMajor. +// +// MatchPathMajor returns true if and only if CheckPathMajor returns nil. func MatchPathMajor(v, pathMajor string) bool { + return CheckPathMajor(v, pathMajor) == nil +} + +// CheckPathMajor returns a non-nil error if the semantic version v +// does not match the path major version pathMajor. +func CheckPathMajor(v, pathMajor string) error { + // TODO(jayconrod): return errors or panic for invalid inputs. This function + // (and others) was covered by integration tests for cmd/go, and surrounding + // code protected against invalid inputs like non-canonical versions. if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { pathMajor = strings.TrimSuffix(pathMajor, "-unstable") } if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. - return true + return nil } m := semver.Major(v) if pathMajor == "" { - return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" + if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" { + return nil + } + pathMajor = "v0 or v1" + } else if pathMajor[0] == '/' || pathMajor[0] == '.' { + if m == pathMajor[1:] { + return nil + } + pathMajor = pathMajor[1:] + } + return &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)), } - return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:] +} + +// PathMajorPrefix returns the major-version tag prefix implied by pathMajor. +// An empty PathMajorPrefix allows either v0 or v1. +// +// Note that MatchPathMajor may accept some versions that do not actually begin +// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' +// pathMajor, even though that pathMajor implies 'v1' tagging. +func PathMajorPrefix(pathMajor string) string { + if pathMajor == "" { + return "" + } + if pathMajor[0] != '/' && pathMajor[0] != '.' { + panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator") + } + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + m := pathMajor[1:] + if m != semver.Major(m) { + panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version") + } + return m } // CanonicalVersion returns the canonical form of the version string v. @@ -345,7 +573,10 @@ func CanonicalVersion(v string) string { return cv } -// Sort sorts the list by Path, breaking ties by comparing Versions. +// Sort sorts the list by Path, breaking ties by comparing Version fields. +// The Version fields are interpreted as semantic versions (using semver.Compare) +// optionally followed by a tie-breaking suffix introduced by a slash character, +// like in "v0.0.1/go.mod". func Sort(list []Version) { sort.Slice(list, func(i, j int) bool { mi := list[i] @@ -372,93 +603,36 @@ func Sort(list []Version) { }) } -// Safe encodings -// -// Module paths appear as substrings of file system paths -// (in the download cache) and of web server URLs in the proxy protocol. -// In general we cannot rely on file systems to be case-sensitive, -// nor can we rely on web servers, since they read from file systems. -// That is, we cannot rely on the file system to keep rsc.io/QUOTE -// and rsc.io/quote separate. Windows and macOS don't. -// Instead, we must never require two different casings of a file path. -// Because we want the download cache to match the proxy protocol, -// and because we want the proxy protocol to be possible to serve -// from a tree of static files (which might be stored on a case-insensitive -// file system), the proxy protocol must never require two different casings -// of a URL path either. -// -// One possibility would be to make the safe encoding be the lowercase -// hexadecimal encoding of the actual path bytes. This would avoid ever -// needing different casings of a file path, but it would be fairly illegible -// to most programmers when those paths appeared in the file system -// (including in file paths in compiler errors and stack traces) -// in web server logs, and so on. Instead, we want a safe encoding that -// leaves most paths unaltered. -// -// The safe encoding is this: -// replace every uppercase letter with an exclamation mark -// followed by the letter's lowercase equivalent. -// -// For example, -// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. -// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy -// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. -// -// Import paths that avoid upper-case letters are left unchanged. -// Note that because import paths are ASCII-only and avoid various -// problematic punctuation (like : < and >), the safe encoding is also ASCII-only -// and avoids the same problematic punctuation. -// -// Import paths have never allowed exclamation marks, so there is no -// need to define how to encode a literal !. -// -// Although paths are disallowed from using Unicode (see pathOK above), -// the eventual plan is to allow Unicode letters as well, to assume that -// file systems and URLs are Unicode-safe (storing UTF-8), and apply -// the !-for-uppercase convention. Note however that not all runes that -// are different but case-fold equivalent are an upper/lower pair. -// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) -// are considered to case-fold to each other. When we do add Unicode -// letters, we must not assume that upper/lower are the only case-equivalent pairs. -// Perhaps the Kelvin symbol would be disallowed entirely, for example. -// Or perhaps it would encode as "!!k", or perhaps as "(212A)". -// -// Also, it would be nice to allow Unicode marks as well as letters, -// but marks include combining marks, and then we must deal not -// only with case folding but also normalization: both U+00E9 ('é') -// and U+0065 U+0301 ('e' followed by combining acute accent) -// look the same on the page and are treated by some file systems -// as the same path. If we do allow Unicode marks in paths, there -// must be some kind of normalization to allow only one canonical -// encoding of any character used in an import path. - -// EncodePath returns the safe encoding of the given module path. +// EscapePath returns the escaped form of the given module path. // It fails if the module path is invalid. -func EncodePath(path string) (encoding string, err error) { +func EscapePath(path string) (escaped string, err error) { if err := CheckPath(path); err != nil { return "", err } - return encodeString(path) + return escapeString(path) } -// EncodeVersion returns the safe encoding of the given module version. +// EscapeVersion returns the escaped form of the given module version. // Versions are allowed to be in non-semver form but must be valid file names // and not contain exclamation marks. -func EncodeVersion(v string) (encoding string, err error) { +func EscapeVersion(v string) (escaped string, err error) { if err := checkElem(v, true); err != nil || strings.Contains(v, "!") { - return "", fmt.Errorf("disallowed version string %q", v) + return "", &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("disallowed version string"), + } } - return encodeString(v) + return escapeString(v) } -func encodeString(s string) (encoding string, err error) { +func escapeString(s string) (escaped string, err error) { haveUpper := false for _, r := range s { if r == '!' || r >= utf8.RuneSelf { // This should be disallowed by CheckPath, but diagnose anyway. - // The correctness of the encoding loop below depends on it. - return "", fmt.Errorf("internal error: inconsistency in EncodePath") + // The correctness of the escaping loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EscapePath") } if 'A' <= r && r <= 'Z' { haveUpper = true @@ -480,39 +654,39 @@ func encodeString(s string) (encoding string, err error) { return string(buf), nil } -// DecodePath returns the module path of the given safe encoding. -// It fails if the encoding is invalid or encodes an invalid path. -func DecodePath(encoding string) (path string, err error) { - path, ok := decodeString(encoding) +// UnescapePath returns the module path for the given escaped path. +// It fails if the escaped path is invalid or describes an invalid path. +func UnescapePath(escaped string) (path string, err error) { + path, ok := unescapeString(escaped) if !ok { - return "", fmt.Errorf("invalid module path encoding %q", encoding) + return "", fmt.Errorf("invalid escaped module path %q", escaped) } if err := CheckPath(path); err != nil { - return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err) + return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err) } return path, nil } -// DecodeVersion returns the version string for the given safe encoding. -// It fails if the encoding is invalid or encodes an invalid version. +// UnescapeVersion returns the version string for the given escaped version. +// It fails if the escaped form is invalid or describes an invalid version. // Versions are allowed to be in non-semver form but must be valid file names // and not contain exclamation marks. -func DecodeVersion(encoding string) (v string, err error) { - v, ok := decodeString(encoding) +func UnescapeVersion(escaped string) (v string, err error) { + v, ok := unescapeString(escaped) if !ok { - return "", fmt.Errorf("invalid version encoding %q", encoding) + return "", fmt.Errorf("invalid escaped version %q", escaped) } if err := checkElem(v, true); err != nil { - return "", fmt.Errorf("disallowed version string %q", v) + return "", fmt.Errorf("invalid escaped version %q: %v", v, err) } return v, nil } -func decodeString(encoding string) (string, bool) { +func unescapeString(escaped string) (string, bool) { var buf []byte bang := false - for _, r := range encoding { + for _, r := range escaped { if r >= utf8.RuneSelf { return "", false } diff --git a/vendor/golang.org/x/tools/internal/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go similarity index 99% rename from vendor/golang.org/x/tools/internal/semver/semver.go rename to vendor/golang.org/x/mod/semver/semver.go index 4af7118e55d..2988e3cf9c5 100644 --- a/vendor/golang.org/x/tools/internal/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -107,7 +107,7 @@ func Build(v string) string { } // Compare returns an integer comparing two versions according to -// according to semantic version precedence. +// semantic version precedence. // The result will be 0 if v == w, -1 if v < w, or +1 if v > w. // // An invalid semantic version string is considered less than a valid one. @@ -263,7 +263,7 @@ func parseBuild(v string) (t, rest string, ok bool) { i := 1 start := 1 for i < len(v) { - if !isIdentChar(v[i]) { + if !isIdentChar(v[i]) && v[i] != '.' { return } if v[i] == '.' { diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go index e1468693f09..a5cd587e4b0 100644 --- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go +++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -610,7 +610,12 @@ func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) { g.Printf("\t\treturn _%s_name_%d\n", typeName, i) continue } - g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1]) + if values[0].value == 0 && !values[0].signed { + // For an unsigned lower bound of 0, "0 <= i" would be redundant. + g.Printf("\tcase i <= %s:\n", &values[len(values)-1]) + } else { + g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1]) + } if values[0].value != 0 { g.Printf("\t\ti -= %s\n", &values[0]) } diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go index 1b7b7ed5aef..ea56b724e8b 100644 --- a/vendor/golang.org/x/tools/go/analysis/doc.go +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -1,6 +1,6 @@ /* -The analysis package defines the interface between a modular static +Package analysis defines the interface between a modular static analysis and an analysis driver program. @@ -70,39 +70,6 @@ A driver may use the name, flags, and documentation to provide on-line help that describes the analyses it performs. The doc comment contains a brief one-line summary, optionally followed by paragraphs of explanation. -The vet command, shown below, is an example of a driver that runs -multiple analyzers. It is based on the multichecker package -(see the "Standalone commands" section for details). - - $ go build golang.org/x/tools/go/analysis/cmd/vet - $ ./vet help - vet is a tool for static analysis of Go programs. - - Usage: vet [-flag] [package] - - Registered analyzers: - - asmdecl report mismatches between assembly files and Go declarations - assign check for useless assignments - atomic check for common mistakes using the sync/atomic package - ... - unusedresult check for unused results of calls to some functions - - $ ./vet help unusedresult - unusedresult: check for unused results of calls to some functions - - Analyzer flags: - - -unusedresult.funcs value - comma-separated list of functions whose results must be used (default Error,String) - -unusedresult.stringmethods value - comma-separated list of names of methods of type func() string whose results must be used - - Some functions like fmt.Errorf return a result and have no side effects, - so it is always a mistake to discard the result. This analyzer reports - calls to certain functions in which the result of the call is ignored. - - The set of functions may be controlled using flags. The Analyzer type has more fields besides those shown above: diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 3e4b195368b..2087ceec9cf 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -275,9 +275,10 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del // We deleted an entry but now there may be // a blank line-sized hole where the import was. - if line-lastLine > 1 { + if line-lastLine > 1 || !gen.Rparen.IsValid() { // There was a blank line immediately preceding the deleted import, - // so there's no need to close the hole. + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. // Do nothing. } else if line != fset.File(gen.Rparen).LineCount() { // There was no blank line. Close the hole. diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index ddbdd3f08fc..3084508b5f8 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -90,7 +90,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // The types argument, if non-empty, enables type-based filtering of // events. The function f if is called only for nodes whose type // matches an element of the types slice. -func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (prune bool)) { +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { mask := maskOf(types) for i := 0; i < len(in.events); { ev := in.events[i] @@ -114,7 +114,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (prun // supplies each call to f an additional argument, the current // traversal stack. The stack's first element is the outermost node, // an *ast.File; its last is the innermost, n. -func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (prune bool)) { +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { mask := maskOf(types) var stack []ast.Node for i := 0; i < len(in.events); { diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go index 9cf186605f6..8dcd8bbb71a 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -344,7 +344,7 @@ func (p *parser) expectKeyword(keyword string) { // PackageId = string_lit . // -func (p *parser) parsePackageId() string { +func (p *parser) parsePackageID() string { id, err := strconv.Unquote(p.expect(scanner.String)) if err != nil { p.error(err) @@ -384,7 +384,7 @@ func (p *parser) parseDotIdent() string { // func (p *parser) parseQualifiedName() (id, name string) { p.expect('@') - id = p.parsePackageId() + id = p.parsePackageID() p.expect('.') // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. if p.tok == '?' { @@ -696,7 +696,7 @@ func (p *parser) parseInterfaceType(parent *types.Package) types.Type { // Complete requires the type's embedded interfaces to be fully defined, // but we do not define any - return types.NewInterface(methods, nil).Complete() + return newInterface(methods, nil).Complete() } // ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . @@ -785,7 +785,7 @@ func (p *parser) parseType(parent *types.Package) types.Type { func (p *parser) parseImportDecl() { p.expectKeyword("import") name := p.parsePackageName() - p.getPkg(p.parsePackageId(), name) + p.getPkg(p.parsePackageID(), name) } // int_lit = [ "+" | "-" ] { "0" ... "9" } . diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 2ac7c02a3b0..fc0b28ecf95 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -6,17 +6,17 @@ package packages import ( "bytes" + "context" "encoding/json" "fmt" "go/types" - "io/ioutil" "log" "os" "os/exec" "path" "path/filepath" "reflect" - "regexp" + "sort" "strconv" "strings" "sync" @@ -24,8 +24,6 @@ import ( "unicode" "golang.org/x/tools/go/internal/packagesdriver" - "golang.org/x/tools/internal/gopathwalk" - "golang.org/x/tools/internal/semver" ) // debug controls verbose logging. @@ -44,16 +42,21 @@ type responseDeduper struct { dr *driverResponse } -// init fills in r with a driverResponse. -func (r *responseDeduper) init(dr *driverResponse) { - r.dr = dr - r.seenRoots = map[string]bool{} - r.seenPackages = map[string]*Package{} +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &driverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a driverResponse. +func (r *responseDeduper) addAll(dr *driverResponse) { for _, pkg := range dr.Packages { - r.seenPackages[pkg.ID] = pkg + r.addPackage(pkg) } for _, root := range dr.Roots { - r.seenRoots[root] = true + r.addRoot(root) } } @@ -73,25 +76,47 @@ func (r *responseDeduper) addRoot(id string) { r.dr.Roots = append(r.dr.Roots, id) } -// goInfo contains global information from the go tool. -type goInfo struct { - rootDirs map[string]string - env goEnv +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool } -type goEnv struct { - modulesOn bool +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError } -func determineEnv(cfg *Config) goEnv { - buf, err := invokeGo(cfg, "env", "GOMOD") +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() if err != nil { - return goEnv{} + panic(fmt.Sprintf("mustGetEnv: %v", err)) } - gomod := bytes.TrimSpace(buf.Bytes()) - - env := goEnv{} - env.modulesOn = len(gomod) > 0 return env } @@ -99,47 +124,38 @@ func determineEnv(cfg *Config) goEnv { // the build system package structure. // See driver for more details. func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { - var sizes types.Sizes + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + // Fill in response.Sizes asynchronously if necessary. var sizeserr error var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { - sizes, sizeserr = getSizes(cfg) + var sizes types.Sizes + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) + // types.SizesFor always returns nil or a *types.StdSizes. + response.dr.Sizes, _ = sizes.(*types.StdSizes) sizeswg.Done() }() } - defer sizeswg.Wait() - - // start fetching rootDirs - var info goInfo - var rootDirsReady, envReady = make(chan struct{}), make(chan struct{}) - go func() { - info.rootDirs = determineRootDirs(cfg) - close(rootDirsReady) - }() - go func() { - info.env = determineEnv(cfg) - close(envReady) - }() - getGoInfo := func() *goInfo { - <-rootDirsReady - <-envReady - return &info - } - - // Ensure that we don't leak goroutines: Load is synchronous, so callers will - // not expect it to access the fields of cfg after the call returns. - defer getGoInfo() - // always pass getGoInfo to golistDriver - golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) { - return golistDriver(cfg, getGoInfo, patterns...) + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, } // Determine files requested in contains patterns var containFiles []string - var packagesNamed []string restPatterns := make([]string, 0, len(patterns)) // Extract file= and other [querytype]= patterns. Report an error if querytype // doesn't exist. @@ -155,8 +171,6 @@ extractQueries: containFiles = append(containFiles, value) case "pattern": restPatterns = append(restPatterns, value) - case "iamashamedtousethedisabledqueryname": - packagesNamed = append(packagesNamed, value) case "": // not a reserved query restPatterns = append(restPatterns, pattern) default: @@ -172,52 +186,34 @@ extractQueries: } } - response := &responseDeduper{} - var err error - // See if we have any patterns to pass through to go list. Zero initial // patterns also requires a go list call, since it's the equivalent of // ".". if len(restPatterns) > 0 || len(patterns) == 0 { - dr, err := golistDriver(cfg, restPatterns...) + dr, err := state.createDriverResponse(restPatterns...) if err != nil { return nil, err } - response.init(dr) - } else { - response.init(&driverResponse{}) + response.addAll(dr) } - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr - } - // types.SizesFor always returns nil or a *types.StdSizes - response.dr.Sizes, _ = sizes.(*types.StdSizes) - - var containsCandidates []string - if len(containFiles) != 0 { - if err := runContainsQueries(cfg, golistDriver, response, containFiles, getGoInfo); err != nil { - return nil, err - } - } - - if len(packagesNamed) != 0 { - if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { + if err := state.runContainsQueries(response, containFiles); err != nil { return nil, err } } - modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) if err != nil { return nil, err } + + var containsCandidates []string if len(containFiles) > 0 { containsCandidates = append(containsCandidates, modifiedPkgs...) containsCandidates = append(containsCandidates, needPkgs...) } - if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs, getGoInfo); err != nil { + if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { return nil, err } // Check candidate packages for containFiles. @@ -246,28 +242,32 @@ extractQueries: } } + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } return response.dr, nil } -func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string, getGoInfo func() *goInfo) error { +func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { if len(pkgs) == 0 { return nil } - dr, err := driver(cfg, pkgs...) + dr, err := state.createDriverResponse(pkgs...) if err != nil { return err } for _, pkg := range dr.Packages { response.addPackage(pkg) } - _, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + _, needPkgs, err := state.processGolistOverlay(response) if err != nil { return err } - return addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo) + return state.addNeededOverlayPackages(response, needPkgs) } -func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error { +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) @@ -277,44 +277,17 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } - dirResponse, err := driver(cfg, pattern) - if err != nil || (len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1) { - // There was an error loading the package. Try to load the file as an ad-hoc package. - // Usually the error will appear in a returned package, but may not if we're in modules mode - // and the ad-hoc is located outside a module. + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or the package is returned + // with errors, try to load the file as an ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { var queryErr error - dirResponse, queryErr = driver(cfg, query) - if queryErr != nil { - // Return the original error if the attempt to fall back failed. - return err - } - // If we get nothing back from `go list`, try to make this file into its own ad-hoc package. - if len(dirResponse.Packages) == 0 && queryErr == nil { - dirResponse.Packages = append(dirResponse.Packages, &Package{ - ID: "command-line-arguments", - PkgPath: query, - GoFiles: []string{query}, - CompiledGoFiles: []string{query}, - Imports: make(map[string]*Package), - }) - dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments") - } - // Special case to handle issue #33482: - // If this is a file= query for ad-hoc packages where the file only exists on an overlay, - // and exists outside of a module, add the file in for the package. - if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" || - filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) { - if len(dirResponse.Packages[0].GoFiles) == 0 { - filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath - // TODO(matloob): check if the file is outside of a root dir? - for path := range cfg.Overlay { - if path == filename { - dirResponse.Packages[0].Errors = nil - dirResponse.Packages[0].GoFiles = []string{path} - dirResponse.Packages[0].CompiledGoFiles = []string{path} - } - } - } + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { + return err // return the original error } } isRoot := make(map[string]bool, len(dirResponse.Roots)) @@ -342,276 +315,47 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q return nil } -// modCacheRegexp splits a path in a module cache into module, module version, and package. -var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) - -func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { - // calling `go env` isn't free; bail out if there's nothing to do. - if len(queries) == 0 { - return nil - } - // Determine which directories are relevant to scan. - roots, modRoot, err := roots(cfg) - if err != nil { - return err - } - - // Scan the selected directories. Simple matches, from GOPATH/GOROOT - // or the local module, can simply be "go list"ed. Matches from the - // module cache need special treatment. - var matchesMu sync.Mutex - var simpleMatches, modCacheMatches []string - add := func(root gopathwalk.Root, dir string) { - // Walk calls this concurrently; protect the result slices. - matchesMu.Lock() - defer matchesMu.Unlock() - - path := dir - if dir != root.Path { - path = dir[len(root.Path)+1:] - } - if pathMatchesQueries(path, queries) { - switch root.Type { - case gopathwalk.RootModuleCache: - modCacheMatches = append(modCacheMatches, path) - case gopathwalk.RootCurrentModule: - // We'd need to read go.mod to find the full - // import path. Relative's easier. - rel, err := filepath.Rel(cfg.Dir, dir) - if err != nil { - // This ought to be impossible, since - // we found dir in the current module. - panic(err) - } - simpleMatches = append(simpleMatches, "./"+rel) - case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT: - simpleMatches = append(simpleMatches, path) - } - } - } - - startWalk := time.Now() - gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug}) - cfg.Logf("%v for walk", time.Since(startWalk)) - - // Weird special case: the top-level package in a module will be in - // whatever directory the user checked the repository out into. It's - // more reasonable for that to not match the package name. So, if there - // are any Go files in the mod root, query it just to be safe. - if modRoot != "" { - rel, err := filepath.Rel(cfg.Dir, modRoot) - if err != nil { - panic(err) // See above. - } - - files, err := ioutil.ReadDir(modRoot) - if err != nil { - panic(err) // See above. - } - - for _, f := range files { - if strings.HasSuffix(f.Name(), ".go") { - simpleMatches = append(simpleMatches, rel) - break - } - } - } - - addResponse := func(r *driverResponse) { - for _, pkg := range r.Packages { - response.addPackage(pkg) - for _, name := range queries { - if pkg.Name == name { - response.addRoot(pkg.ID) - break - } - } - } - } - - if len(simpleMatches) != 0 { - resp, err := driver(cfg, simpleMatches...) - if err != nil { - return err - } - addResponse(resp) - } - - // Module cache matches are tricky. We want to avoid downloading new - // versions of things, so we need to use the ones present in the cache. - // go list doesn't accept version specifiers, so we have to write out a - // temporary module, and do the list in that module. - if len(modCacheMatches) != 0 { - // Collect all the matches, deduplicating by major version - // and preferring the newest. - type modInfo struct { - mod string - major string - } - mods := make(map[modInfo]string) - var imports []string - for _, modPath := range modCacheMatches { - matches := modCacheRegexp.FindStringSubmatch(modPath) - mod, ver := filepath.ToSlash(matches[1]), matches[2] - importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3])) - - major := semver.Major(ver) - if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 { - mods[modInfo{mod, major}] = ver - } - - imports = append(imports, importPath) - } - - // Build the temporary module. - var gomod bytes.Buffer - gomod.WriteString("module modquery\nrequire (\n") - for mod, version := range mods { - gomod.WriteString("\t" + mod.mod + " " + version + "\n") - } - gomod.WriteString(")\n") - - tmpCfg := *cfg - - // We're only trying to look at stuff in the module cache, so - // disable the network. This should speed things up, and has - // prevented errors in at least one case, #28518. - tmpCfg.Env = append([]string{"GOPROXY=off"}, cfg.Env...) - - var err error - tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") - if err != nil { - return err - } - defer os.RemoveAll(tmpCfg.Dir) - - if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil { - return fmt.Errorf("writing go.mod for module cache query: %v", err) - } - - // Run the query, using the import paths calculated from the matches above. - resp, err := driver(&tmpCfg, imports...) - if err != nil { - return fmt.Errorf("querying module cache matches: %v", err) - } - addResponse(resp) - } - - return nil -} - -func getSizes(cfg *Config) (types.Sizes, error) { - return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) -} - -// roots selects the appropriate paths to walk based on the passed-in configuration, -// particularly the environment and the presence of a go.mod in cfg.Dir's parents. -func roots(cfg *Config) ([]gopathwalk.Root, string, error) { - stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD") +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { + response, err := state.createDriverResponse(query) if err != nil { - return nil, "", err - } - - fields := strings.Split(stdout.String(), "\n") - if len(fields) != 4 || len(fields[3]) != 0 { - return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String()) - } - goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2] - var modDir string - if gomod != "" { - modDir = filepath.Dir(gomod) + return nil, err } - - var roots []gopathwalk.Root - // Always add GOROOT. - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(goroot, "/src"), - Type: gopathwalk.RootGOROOT, - }) - // If modules are enabled, scan the module dir. - if modDir != "" { - roots = append(roots, gopathwalk.Root{ - Path: modDir, - Type: gopathwalk.RootCurrentModule, + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), }) - } - // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. - for _, p := range gopath { - if modDir != "" { - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(p, "/pkg/mod"), - Type: gopathwalk.RootModuleCache, - }) - } else { - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(p, "/src"), - Type: gopathwalk.RootGOPATH, - }) - } - } - - return roots, modDir, nil -} - -// These functions were copied from goimports. See further documentation there. - -// pathMatchesQueries is adapted from pkgIsCandidate. -// TODO: is it reasonable to do Contains here, rather than an exact match on a path component? -func pathMatchesQueries(path string, queries []string) bool { - lastTwo := lastTwoComponents(path) - for _, query := range queries { - if strings.Contains(lastTwo, query) { - return true - } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) - if strings.Contains(lastTwo, query) { - return true - } - } - } - return false -} - -// lastTwoComponents returns at most the last two path components -// of v, using either / or \ as the path separator. -func lastTwoComponents(v string) string { - nslash := 0 - for i := len(v) - 1; i >= 0; i-- { - if v[i] == '/' || v[i] == '\\' { - nslash++ - if nslash == 2 { - return v[i:] + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } + } } } } - return v -} - -func hasHyphenOrUpperASCII(s string) bool { - for i := 0; i < len(s); i++ { - b := s[i] - if b == '-' || ('A' <= b && b <= 'Z') { - return true - } - } - return false -} - -func lowerASCIIAndRemoveHyphen(s string) (ret string) { - buf := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case b == '-': - continue - case 'A' <= b && b <= 'Z': - buf = append(buf, b+('a'-'A')) - default: - buf = append(buf, b) - } - } - return string(buf) + return response, nil } // Fields must match go list; @@ -656,10 +400,9 @@ func otherFiles(p *jsonPackage) [][]string { return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} } -// golistDriver uses the "go list" command to expand the pattern -// words and return metadata for the specified packages. dir may be -// "" and env may be nil, as per os/exec.Command. -func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driverResponse, error) { +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -673,11 +416,13 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv // Run "go list" for complete // information on the specified packages. - buf, err := invokeGo(cfg, golistargs(cfg, words)...) + buf, err := state.invokeGo("list", golistargs(state.cfg, words)...) if err != nil { return nil, err } seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. var response driverResponse for dec := json.NewDecoder(buf); dec.More(); { @@ -708,18 +453,72 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv // contained in a known module or GOPATH entry. This will allow the package to be // properly "reclaimed" when overlays are processed. if filepath.IsAbs(p.ImportPath) && p.Error != nil { - pkgPath, ok := getPkgPath(cfg, p.ImportPath, rootsDirs) + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } if ok { p.ImportPath = pkgPath } } if old, found := seen[p.ImportPath]; found { - if !reflect.DeepEqual(p, old) { - return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue } - // skip the duplicate - continue + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack with fewer than two elements`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-2] + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. } seen[p.ImportPath] = p @@ -729,6 +528,7 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), + forTest: p.ForTest, } // Work around https://golang.org/issue/28749: @@ -817,29 +617,37 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv }) } + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { response.Packages = append(response.Packages, pkg) } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) return &response, nil } // getPkgPath finds the package path of a directory if it's relative to a root directory. -func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { +func (state *golistState) getPkgPath(dir string) (string, bool, error) { absDir, err := filepath.Abs(dir) if err != nil { - cfg.Logf("error getting absolute path of %s: %v", dir, err) - return "", false + return "", false, err } - for rdir, rpath := range goInfo().rootDirs { - absRdir, err := filepath.Abs(rdir) - if err != nil { - cfg.Logf("error getting absolute path of %s: %v", rdir, err) - continue - } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { // Make sure that the directory is in the module, // to avoid creating a path relative to another module. - if !strings.HasPrefix(absDir, absRdir) { - cfg.Logf("%s does not have prefix %s", absDir, absRdir) + if !strings.HasPrefix(absDir, rdir) { continue } // TODO(matloob): This doesn't properly handle symlinks. @@ -854,11 +662,11 @@ func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { // Once the file is saved, gopls, or the next invocation of the tool will get the correct // result straight from golist. // TODO(matloob): Implement module tiebreaking? - return path.Join(rpath, filepath.ToSlash(r)), true + return path.Join(rpath, filepath.ToSlash(r)), true, nil } - return filepath.ToSlash(r), true + return filepath.ToSlash(r), true, nil } - return "", false + return "", false, nil } // absJoin absolutizes and flattens the lists of files. @@ -877,8 +685,8 @@ func absJoin(dir string, fileses ...[]string) (res []string) { func golistargs(cfg *Config, words []string) []string { const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo fullargs := []string{ - "list", "-e", "-json", - fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), + "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), fmt.Sprintf("-test=%t", cfg.Tests), fmt.Sprintf("-export=%t", usesExportData(cfg)), fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), @@ -893,10 +701,17 @@ func golistargs(cfg *Config, words []string) []string { } // invokeGo returns the stdout of a go command invocation. -func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, "go", args...) + goArgs := []string{verb} + if verb != "env" { + goArgs = append(goArgs, cfg.BuildFlags...) + } + goArgs = append(goArgs, args...) + cmd := exec.CommandContext(state.ctx, "go", goArgs...) // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. @@ -908,7 +723,7 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { cmd.Stdout = stdout cmd.Stderr = stderr defer func(start time.Time) { - cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr, stdout) + cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, goArgs...), stderr, stdout) }(time.Now()) if err := cmd.Run(); err != nil { @@ -948,7 +763,12 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) } if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { - if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") { + msg := stderr.String()[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { return stdout, nil } } diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index a7de62299d6..7974a6c9bb6 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -1,12 +1,13 @@ package packages import ( - "bytes" "encoding/json" "fmt" "go/parser" "go/token" + "os" "path/filepath" + "sort" "strconv" "strings" ) @@ -16,7 +17,7 @@ import ( // sometimes incorrect. // TODO(matloob): Handle unsupported cases, including the following: // - determining the correct package to add given a new import path -func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func() *goInfo) (modifiedPkgs, needPkgs []string, err error) { +func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { havePkgs := make(map[string]string) // importPath -> non-test package ID needPkgsSet := make(map[string]bool) modifiedPkgsSet := make(map[string]bool) @@ -34,7 +35,23 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( // potentially modifying the transitive set of dependencies). var overlayAddsImports bool - for opath, contents := range cfg.Overlay { + // If both a package and its test package are created by the overlay, we + // need the real package first. Process all non-test files before test + // files, and make the whole process deterministic while we're at it. + var overlayFiles []string + for opath := range state.cfg.Overlay { + overlayFiles = append(overlayFiles, opath) + } + sort.Slice(overlayFiles, func(i, j int) bool { + iTest := strings.HasSuffix(overlayFiles[i], "_test.go") + jTest := strings.HasSuffix(overlayFiles[j], "_test.go") + if iTest != jTest { + return !iTest // non-tests are before tests. + } + return overlayFiles[i] < overlayFiles[j] + }) + for _, opath := range overlayFiles { + contents := state.cfg.Overlay[opath] base := filepath.Base(opath) dir := filepath.Dir(opath) var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant @@ -64,14 +81,8 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( testVariantOf = p continue nextPackage } + // We must have already seen the package of which this is a test variant. if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { - // If we've already seen the test variant, - // make sure to label which package it is a test variant of. - if hasTestFiles(pkg) { - testVariantOf = p - continue nextPackage - } - // If we have already seen the package of which this is a test variant. if hasTestFiles(p) { testVariantOf = pkg } @@ -86,7 +97,10 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( if pkg == nil { // Try to find the module or gopath dir the file is contained in. // Then for modules, add the module opath to the beginning. - pkgPath, ok := getPkgPath(cfg, dir, rootDirs) + pkgPath, ok, err := state.getPkgPath(dir) + if err != nil { + return nil, nil, err + } if !ok { break } @@ -114,6 +128,11 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( if isTestFile && !isXTest && testVariantOf != nil { pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } } } } @@ -130,42 +149,45 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( continue } for _, imp := range imports { - _, found := pkg.Imports[imp] - if !found { - overlayAddsImports = true - // TODO(matloob): Handle cases when the following block isn't correct. - // These include imports of vendored packages, etc. - id, ok := havePkgs[imp] - if !ok { - id = imp - } - pkg.Imports[imp] = &Package{ID: id} - // Add dependencies to the non-test variant version of this package as wel. - if testVariantOf != nil { - testVariantOf.Imports[imp] = &Package{ID: id} + if _, found := pkg.Imports[imp]; found { + continue + } + overlayAddsImports = true + id, ok := havePkgs[imp] + if !ok { + var err error + id, err = state.resolveImport(dir, imp) + if err != nil { + return nil, nil, err } } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as well. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } } - continue } - // toPkgPath tries to guess the package path given the id. - // This isn't always correct -- it's certainly wrong for - // vendored packages' paths. - toPkgPath := func(id string) string { - // TODO(matloob): Handle vendor paths. - i := strings.IndexByte(id, ' ') - if i >= 0 { - return id[:i] + // toPkgPath guesses the package path given the id. + toPkgPath := func(sourceDir, id string) (string, error) { + if i := strings.IndexByte(id, ' '); i >= 0 { + return state.resolveImport(sourceDir, id[:i]) } - return id + return state.resolveImport(sourceDir, id) } - // Do another pass now that new packages have been created to determine the - // set of missing packages. + // Now that new packages have been created, do another pass to determine + // the new set of missing packages. for _, pkg := range response.dr.Packages { for _, imp := range pkg.Imports { - pkgPath := toPkgPath(imp.ID) + if len(pkg.GoFiles) == 0 { + return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) + } + pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) + if err != nil { + return nil, nil, err + } if _, ok := havePkgs[pkgPath]; !ok { needPkgsSet[pkgPath] = true } @@ -185,6 +207,52 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( return modifiedPkgs, needPkgs, err } +// resolveImport finds the the ID of a package given its import path. +// In particular, it will find the right vendored copy when in GOPATH mode. +func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { + env, err := state.getEnv() + if err != nil { + return "", err + } + if env["GOMOD"] != "" { + return importPath, nil + } + + searchDir := sourceDir + for { + vendorDir := filepath.Join(searchDir, "vendor") + exists, ok := state.vendorDirs[vendorDir] + if !ok { + info, err := os.Stat(vendorDir) + exists = err == nil && info.IsDir() + state.vendorDirs[vendorDir] = exists + } + + if exists { + vendoredPath := filepath.Join(vendorDir, importPath) + if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { + // We should probably check for .go files here, but shame on anyone who fools us. + path, ok, err := state.getPkgPath(vendoredPath) + if err != nil { + return "", err + } + if ok { + return path, nil + } + } + } + + // We know we've hit the top of the filesystem when we Dir / and get /, + // or C:\ and get C:\, etc. + next := filepath.Dir(searchDir) + if next == searchDir { + break + } + searchDir = next + } + return importPath, nil +} + func hasTestFiles(p *Package) bool { for _, f := range p.GoFiles { if strings.HasSuffix(f, "_test.go") { @@ -194,44 +262,59 @@ func hasTestFiles(p *Package) bool { return false } -// determineRootDirs returns a mapping from directories code can be contained in to the -// corresponding import path prefixes of those directories. -// Its result is used to try to determine the import path for a package containing -// an overlay file. -func determineRootDirs(cfg *Config) map[string]string { - // Assume modules first: - out, err := invokeGo(cfg, "list", "-m", "-json", "all") +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() if err != nil { - return determineRootDirsGOPATH(cfg) + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + return nil, err } m := map[string]string{} type jsonMod struct{ Path, Dir string } for dec := json.NewDecoder(out); dec.More(); { mod := new(jsonMod) if err := dec.Decode(mod); err != nil { - return m // Give up and return an empty map. Package won't be found for overlay. + return nil, err } if mod.Dir != "" && mod.Path != "" { // This is a valid module; add it to the map. - m[mod.Dir] = mod.Path + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + m[absDir] = mod.Path } } - return m + return m, nil } -func determineRootDirsGOPATH(cfg *Config) map[string]string { +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { m := map[string]string{} - out, err := invokeGo(cfg, "env", "GOPATH") - if err != nil { - // Could not determine root dir mapping. Everything is best-effort, so just return an empty map. - // When we try to find the import path for a directory, there will be no root-dir match and - // we'll give up. - return m - } - for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) { - m[filepath.Join(p, "src")] = "" + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" } - return m + return m, nil } func extractImports(filename string, contents []byte) ([]string, error) { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 12ab1e55b0c..586c714f608 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -23,6 +23,7 @@ import ( "sync" "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/packagesinternal" ) // A LoadMode controls the amount of detail to return when loading. @@ -34,6 +35,9 @@ import ( // Load may return more information than requested. type LoadMode int +// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to +// NeedExportFile to make it consistent with the Package field it's adding. + const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota @@ -51,7 +55,7 @@ const ( // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. NeedDeps - // NeedExportsFile adds ExportsFile. + // NeedExportsFile adds ExportFile. NeedExportsFile // NeedTypes adds Types, Fset, and IllTyped. @@ -160,7 +164,7 @@ type Config struct { Tests bool // Overlay provides a mapping of absolute file paths to file contents. - // If the file with the given path already exists, the parser will use the + // If the file with the given path already exists, the parser will use the // alternative file contents provided by the map. // // Overlays provide incomplete support for when a given file doesn't @@ -292,6 +296,15 @@ type Package struct { // TypesSizes provides the effective size function for types in TypesInfo. TypesSizes types.Sizes + + // forTest is the package under test, if any. + forTest string +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } } // An Error describes a problem with a package's metadata, syntax, or types. @@ -500,12 +513,23 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { if i, found := rootMap[pkg.ID]; found { rootIndex = i } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" lpkg := &loaderPackage{ Package: pkg, - needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0, - needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 || - len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files - pkg.ExportFile == "" && pkg.PkgPath != "unsafe", + needtypes: needtypes, + needsrc: needsrc, } ld.pkgs[lpkg.ID] = lpkg if rootIndex >= 0 { diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go index 7219c8e9ff1..9887f7e7a01 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -14,14 +14,14 @@ import ( "sync" ) -// TraverseLink is used as a return value from WalkFuncs to indicate that the +// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the // symlink named in the call may be traversed. -var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") +var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") -// SkipFiles is a used as a return value from WalkFuncs to indicate that the +// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the // callback should not be called for any other files in the current directory. // Child directories will still be traversed. -var SkipFiles = errors.New("fastwalk: skip remaining files in directory") +var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory") // Walk is a faster implementation of filepath.Walk. // @@ -167,7 +167,7 @@ func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { err := w.fn(joined, typ) if typ == os.ModeSymlink { - if err == TraverseLink { + if err == ErrTraverseLink { // Set callbackDone so we don't call it twice for both the // symlink-as-symlink and the symlink-as-directory later: w.enqueue(walkItem{dir: joined, callbackDone: true}) diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go index a906b87595b..b0d6327a9e6 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -26,7 +26,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e continue } if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { - if err == SkipFiles { + if err == ErrSkipFiles { skipFiles = true continue } diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go index 3369b1a0b2d..ce38fdcf83f 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -66,7 +66,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e continue } if err := fn(dirName, name, typ); err != nil { - if err == SkipFiles { + if err == ErrSkipFiles { skipFiles = true continue } diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index d0675622893..64309db74c9 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -189,14 +189,14 @@ func (w *walker) walk(path string, typ os.FileMode) error { if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { // Doesn't make sense to have regular files // directly in your $GOPATH/src or $GOROOT/src. - return fastwalk.SkipFiles + return fastwalk.ErrSkipFiles } if !strings.HasSuffix(path, ".go") { return nil } w.add(w.root, dir) - return fastwalk.SkipFiles + return fastwalk.ErrSkipFiles } if typ == os.ModeDir { base := filepath.Base(path) @@ -224,7 +224,7 @@ func (w *walker) walk(path string, typ os.FileMode) error { return nil } if w.shouldTraverse(dir, fi) { - return fastwalk.TraverseLink + return fastwalk.ErrTraverseLink } } return nil diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 818d44383fa..ee01d34b1bd 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -27,7 +27,6 @@ import ( "unicode/utf8" "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/packages" "golang.org/x/tools/internal/gopathwalk" ) @@ -82,7 +81,8 @@ type ImportFix struct { // IdentName is the identifier that this fix will add or remove. IdentName string // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). - FixType ImportFixType + FixType ImportFixType + Relevance int // see pkg } // An ImportInfo represents a single import statement. @@ -537,7 +537,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv // derive package names from import paths, see if the file is already // complete. We can't add any imports yet, because we don't know // if missing references are actually package vars. - p := &pass{fset: fset, f: f, srcDir: srcDir} + p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} if fixes, done := p.load(); done { return fixes, nil } @@ -559,8 +559,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv } // Third pass: get real package names where we had previously used - // the naive algorithm. This is the first step that will use the - // environment, so we provide it here for the first time. + // the naive algorithm. p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} p.loadRealPackageNames = true p.otherFiles = otherFiles @@ -585,62 +584,86 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv return fixes, nil } -// getCandidatePkgs returns the list of pkgs that are accessible from filename, -// optionall filtered to only packages named pkgName. -func getCandidatePkgs(pkgName, filename string, env *ProcessEnv) ([]*pkg, error) { - // TODO(heschi): filter out current package. (Don't forget x_test can import x.) +// Highest relevance, used for the standard library. Chosen arbitrarily to +// match pre-existing gopls code. +const MaxRelevance = 7 - var result []*pkg +// getCandidatePkgs works with the passed callback to find all acceptable packages. +// It deduplicates by import path, and uses a cached stdlib rather than reading +// from disk. +func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filename, filePkg string, env *ProcessEnv) error { + notSelf := func(p *pkg) bool { + return p.packageName != filePkg || p.dir != filepath.Dir(filename) + } // Start off with the standard library. - for importPath := range stdlib { - if pkgName != "" && path.Base(importPath) != pkgName { - continue - } - result = append(result, &pkg{ + for importPath, exports := range stdlib { + p := &pkg{ dir: filepath.Join(env.GOROOT, "src", importPath), importPathShort: importPath, packageName: path.Base(importPath), - relevance: 0, - }) - } - - // Exclude goroot results -- getting them is relatively expensive, not cached, - // and generally redundant with the in-memory version. - exclude := []gopathwalk.RootType{gopathwalk.RootGOROOT} - // Only the go/packages resolver uses the first argument, and nobody uses that resolver. - scannedPkgs, err := env.GetResolver().scan(nil, true, exclude) - if err != nil { - return nil, err + relevance: MaxRelevance, + } + if notSelf(p) && wrappedCallback.packageNameLoaded(p) { + wrappedCallback.exportsLoaded(p, exports) + } } + var mu sync.Mutex dupCheck := map[string]struct{}{} - for _, pkg := range scannedPkgs { - if pkgName != "" && pkg.packageName != pkgName { - continue - } - if !canUse(filename, pkg.dir) { - continue - } - if _, ok := dupCheck[pkg.importPathShort]; ok { - continue - } - dupCheck[pkg.importPathShort] = struct{}{} - result = append(result, pkg) + + scanFilter := &scanCallback{ + rootFound: func(root gopathwalk.Root) bool { + // Exclude goroot results -- getting them is relatively expensive, not cached, + // and generally redundant with the in-memory version. + return root.Type != gopathwalk.RootGOROOT && wrappedCallback.rootFound(root) + }, + dirFound: wrappedCallback.dirFound, + packageNameLoaded: func(pkg *pkg) bool { + mu.Lock() + defer mu.Unlock() + if _, ok := dupCheck[pkg.importPathShort]; ok { + return false + } + dupCheck[pkg.importPathShort] = struct{}{} + return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg) + }, + exportsLoaded: func(pkg *pkg, exports []string) { + // If we're an x_test, load the package under test's test variant. + if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) { + var err error + _, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true) + if err != nil { + return + } + } + wrappedCallback.exportsLoaded(pkg, exports) + }, } + return env.GetResolver().scan(ctx, scanFilter) +} - // Sort first by relevance, then by package name, with import path as a tiebreaker. - sort.Slice(result, func(i, j int) bool { - pi, pj := result[i], result[j] - if pi.relevance != pj.relevance { - return pi.relevance < pj.relevance - } - if pi.packageName != pj.packageName { - return pi.packageName < pj.packageName - } - return pi.importPathShort < pj.importPathShort - }) +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int { + result := make(map[string]int) + for _, path := range paths { + result[path] = env.GetResolver().scoreImportPath(ctx, path) + } + return result +} - return result, nil +func PrimeCache(ctx context.Context, env *ProcessEnv) error { + // Fully scan the disk for directories, but don't actually read any Go files. + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return false + }, + packageNameLoaded: func(pkg *pkg) bool { + return false + }, + } + return getCandidatePkgs(ctx, callback, "", "", env) } func candidateImportName(pkg *pkg) string { @@ -651,23 +674,37 @@ func candidateImportName(pkg *pkg) string { } // getAllCandidates gets all of the candidates to be imported, regardless of if they are needed. -func getAllCandidates(filename string, env *ProcessEnv) ([]ImportFix, error) { - pkgs, err := getCandidatePkgs("", filename, env) - if err != nil { - return nil, err - } - result := make([]ImportFix, 0, len(pkgs)) - for _, pkg := range pkgs { - result = append(result, ImportFix{ - StmtInfo: ImportInfo{ - ImportPath: pkg.importPathShort, - Name: candidateImportName(pkg), - }, - IdentName: pkg.packageName, - FixType: AddImport, - }) +func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + // Try the assumed package name first, then a simpler path match + // in case of packages named vN, which are not uncommon. + return strings.HasPrefix(ImportPathToAssumedName(pkg.importPathShort), searchPrefix) || + strings.HasPrefix(path.Base(pkg.importPathShort), searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + if !strings.HasPrefix(pkg.packageName, searchPrefix) { + return false + } + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, } - return result, nil + return getCandidatePkgs(ctx, callback, filename, filePkg, env) } // A PackageExport is a package and its exports. @@ -676,42 +713,34 @@ type PackageExport struct { Exports []string } -func getPackageExports(completePackage, filename string, env *ProcessEnv) ([]PackageExport, error) { - pkgs, err := getCandidatePkgs(completePackage, filename, env) - if err != nil { - return nil, err - } - - results := make([]PackageExport, 0, len(pkgs)) - for _, pkg := range pkgs { - fix := &ImportFix{ - StmtInfo: ImportInfo{ - ImportPath: pkg.importPathShort, - Name: candidateImportName(pkg), - }, - IdentName: pkg.packageName, - FixType: AddImport, - } - var exports []string - if e, ok := stdlib[pkg.importPathShort]; ok { - exports = e - } else { - exports, err = loadExportsForPackage(context.Background(), env, completePackage, pkg) - if err != nil { - if env.Debug { - env.Logf("while completing %q, error loading exports from %q: %v", completePackage, pkg.importPathShort, err) - } - continue - } - } - sort.Strings(exports) - results = append(results, PackageExport{ - Fix: fix, - Exports: exports, - }) +func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + return pkg.packageName == searchPkg + }, + exportsLoaded: func(pkg *pkg, exports []string) { + sort.Strings(exports) + wrapped(PackageExport{ + Fix: &ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }, + Exports: exports, + }) + }, } - - return results, nil + return getCandidatePkgs(ctx, callback, filename, filePkg, env) } // ProcessEnv contains environment variables and settings that affect the use of @@ -720,20 +749,26 @@ type ProcessEnv struct { LocalPrefix string Debug bool + BuildFlags []string + // If non-empty, these will be used instead of the // process-wide values. GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string WorkingDir string - // If true, use go/packages regardless of the environment. - ForceGoPackages bool - // Logf is the default logger for the ProcessEnv. Logf func(format string, args ...interface{}) resolver Resolver } +// CopyConfig copies the env's configuration into a new env. +func (e *ProcessEnv) CopyConfig() *ProcessEnv { + copy := *e + copy.resolver = nil + return © +} + func (e *ProcessEnv) env() []string { env := os.Environ() add := func(k, v string) { @@ -757,28 +792,15 @@ func (e *ProcessEnv) GetResolver() Resolver { if e.resolver != nil { return e.resolver } - if e.ForceGoPackages { - e.resolver = &goPackagesResolver{env: e} - return e.resolver - } - out, err := e.invokeGo("env", "GOMOD") if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 { - e.resolver = &gopathResolver{env: e} + e.resolver = newGopathResolver(e) return e.resolver } - e.resolver = &ModuleResolver{env: e} + e.resolver = newModuleResolver(e) return e.resolver } -func (e *ProcessEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config { - return &packages.Config{ - Mode: mode, - Dir: e.WorkingDir, - Env: e.env(), - } -} - func (e *ProcessEnv) buildContext() *build.Context { ctx := build.Default ctx.GOROOT = e.GOROOT @@ -801,8 +823,13 @@ func (e *ProcessEnv) buildContext() *build.Context { return &ctx } -func (e *ProcessEnv) invokeGo(args ...string) (*bytes.Buffer, error) { - cmd := exec.Command("go", args...) +func (e *ProcessEnv) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + goArgs := []string{verb} + if verb != "env" { + goArgs = append(goArgs, e.BuildFlags...) + } + goArgs = append(goArgs, args...) + cmd := exec.Command("go", goArgs...) stdout := &bytes.Buffer{} stderr := &bytes.Buffer{} cmd.Stdout = stdout @@ -832,6 +859,10 @@ func cmdDebugStr(cmd *exec.Cmd) string { func addStdlibCandidates(pass *pass, refs references) { add := func(pkg string) { + // Prevent self-imports. + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir { + return + } exports := copyExports(stdlib[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, @@ -856,94 +887,65 @@ func addStdlibCandidates(pass *pass, refs references) { type Resolver interface { // loadPackageNames loads the package names in importPaths. loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) - // scan finds (at least) the packages satisfying refs. If loadNames is true, - // package names will be set on the results, and dirs whose package name - // could not be determined will be excluded. - scan(refs references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) + // scan works with callback to search for packages. See scanCallback for details. + scan(ctx context.Context, callback *scanCallback) error // loadExports returns the set of exported symbols in the package at dir. // loadExports may be called concurrently. - loadExports(ctx context.Context, pkg *pkg) (string, []string, error) + loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + // scoreImportPath returns the relevance for an import path. + scoreImportPath(ctx context.Context, path string) int ClearForNewScan() } -// gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages. -type goPackagesResolver struct { - env *ProcessEnv -} - -func (r *goPackagesResolver) ClearForNewScan() {} - -func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - if len(importPaths) == 0 { - return nil, nil - } - cfg := r.env.newPackagesConfig(packages.LoadFiles) - pkgs, err := packages.Load(cfg, importPaths...) - if err != nil { - return nil, err - } - names := map[string]string{} - for _, pkg := range pkgs { - names[VendorlessPath(pkg.PkgPath)] = pkg.Name - } - // We may not have found all the packages. Guess the rest. - for _, path := range importPaths { - if _, ok := names[path]; ok { - continue - } - names[path] = ImportPathToAssumedName(path) - } - return names, nil - -} - -func (r *goPackagesResolver) scan(refs references, _ bool, _ []gopathwalk.RootType) ([]*pkg, error) { - var loadQueries []string - for pkgName := range refs { - loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName) - } - sort.Strings(loadQueries) - cfg := r.env.newPackagesConfig(packages.LoadFiles) - goPackages, err := packages.Load(cfg, loadQueries...) - if err != nil { - return nil, err - } - - var scan []*pkg - for _, goPackage := range goPackages { - scan = append(scan, &pkg{ - dir: filepath.Dir(goPackage.CompiledGoFiles[0]), - importPathShort: VendorlessPath(goPackage.PkgPath), - goPackage: goPackage, - packageName: goPackage.Name, - }) - } - return scan, nil +// A scanCallback controls a call to scan and receives its results. +// In general, minor errors will be silently discarded; a user should not +// expect to receive a full series of calls for everything. +type scanCallback struct { + // rootFound is called before scanning a new root dir. If it returns true, + // the root will be scanned. Returning false will not necessarily prevent + // directories from that root making it to dirFound. + rootFound func(gopathwalk.Root) bool + // dirFound is called when a directory is found that is possibly a Go package. + // pkg will be populated with everything except packageName. + // If it returns true, the package's name will be loaded. + dirFound func(pkg *pkg) bool + // packageNameLoaded is called when a package is found and its name is loaded. + // If it returns true, the package's exports will be loaded. + packageNameLoaded func(pkg *pkg) bool + // exportsLoaded is called when a package's exports have been loaded. + exportsLoaded func(pkg *pkg, exports []string) } -func (r *goPackagesResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { - if pkg.goPackage == nil { - return "", nil, fmt.Errorf("goPackage not set") - } - var exports []string - fset := token.NewFileSet() - for _, fname := range pkg.goPackage.CompiledGoFiles { - f, err := parser.ParseFile(fset, fname, nil, 0) - if err != nil { - return "", nil, fmt.Errorf("parsing %s: %v", fname, err) - } - for name := range f.Scope.Objects { - if ast.IsExported(name) { - exports = append(exports, name) +func addExternalCandidates(pass *pass, refs references, filename string) error { + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false } - } + if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !canUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, } - return pkg.goPackage.Name, exports, nil -} - -func addExternalCandidates(pass *pass, refs references, filename string) error { - dirScan, err := pass.env.GetResolver().scan(refs, false, nil) + err := pass.env.GetResolver().scan(context.Background(), callback) if err != nil { return err } @@ -970,7 +972,7 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { go func(pkgName string, symbols map[string]bool) { defer wg.Done() - found, err := findImport(ctx, pass, dirScan, pkgName, symbols, filename) + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename) if err != nil { firstErrOnce.Do(func() { @@ -1041,24 +1043,36 @@ func ImportPathToAssumedName(importPath string) string { // gopathResolver implements resolver for GOPATH workspaces. type gopathResolver struct { - env *ProcessEnv - cache *dirInfoCache + env *ProcessEnv + walked bool + cache *dirInfoCache + scanSema chan struct{} // scanSema prevents concurrent scans. } -func (r *gopathResolver) init() { - if r.cache == nil { - r.cache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - } +func newGopathResolver(env *ProcessEnv) *gopathResolver { + r := &gopathResolver{ + env: env, + cache: &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + }, + scanSema: make(chan struct{}, 1), } + r.scanSema <- struct{}{} + return r } func (r *gopathResolver) ClearForNewScan() { - r.cache = nil + <-r.scanSema + r.cache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + r.walked = false + r.scanSema <- struct{}{} } func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - r.init() names := map[string]string{} for _, path := range importPaths { names[path] = importPathToName(r.env, path, srcDir) @@ -1138,7 +1152,6 @@ func packageDirToName(dir string) (packageName string, err error) { } type pkg struct { - goPackage *packages.Package dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") importPathShort string // vendorless import path ("net/http", "a/b") packageName string // package name loaded from source if requested @@ -1186,8 +1199,7 @@ func distance(basepath, targetpath string) int { return strings.Count(p, string(filepath.Separator)) + 1 } -func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) { - r.init() +func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error { add := func(root gopathwalk.Root, dir string) { // We assume cached directories have not changed. We can skip them and their // children. @@ -1204,56 +1216,84 @@ func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk } r.cache.Store(dir, info) } - roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), exclude) - gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) - var result []*pkg - for _, dir := range r.cache.Keys() { - info, ok := r.cache.Load(dir) - if !ok { - continue - } - if loadNames { - var err error - info, err = r.cache.CachePackageName(info) - if err != nil { - continue - } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return } p := &pkg{ importPathShort: info.nonCanonicalImportPath, - dir: dir, - relevance: 1, - packageName: info.packageName, + dir: info.dir, + relevance: MaxRelevance - 1, } if info.rootType == gopathwalk.RootGOROOT { - p.relevance = 0 + p.relevance = MaxRelevance + } + + if !callback.dirFound(p) { + return + } + var err error + p.packageName, err = r.cache.CachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(p) { + return + } + if _, exports, err := r.loadExports(ctx, p, false); err == nil { + callback.exportsLoaded(p, exports) } - result = append(result, p) } - return result, nil + stop := r.cache.ScanAndListen(ctx, processDir) + defer stop() + // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. + roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} + +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) int { + if _, ok := stdlib[path]; ok { + return MaxRelevance + } + return MaxRelevance - 1 } -func filterRoots(roots []gopathwalk.Root, exclude []gopathwalk.RootType) []gopathwalk.Root { +func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root { var result []gopathwalk.Root -outer: for _, root := range roots { - for _, i := range exclude { - if i == root.Type { - continue outer - } + if !include(root) { + continue } result = append(result, root) } return result } -func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { - r.init() - if info, ok := r.cache.Load(pkg.dir); ok { +func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { + if info, ok := r.cache.Load(pkg.dir); ok && !includeTest { return r.cache.CacheExports(ctx, r.env, info) } - return loadExportsFromFiles(ctx, r.env, pkg.dir) + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) } // VendorlessPath returns the devendorized version of the import path ipath. @@ -1269,7 +1309,7 @@ func VendorlessPath(ipath string) string { return ipath } -func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (string, []string, error) { +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { var exports []string // Look for non-test, buildable .go files which could provide exports. @@ -1280,7 +1320,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str var files []os.FileInfo for _, fi := range all { name := fi.Name() - if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") { + if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { continue } match, err := env.buildContext().MatchFile(dir, fi.Name()) @@ -1313,6 +1353,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str // handled by MatchFile above. continue } + if includeTest && strings.HasSuffix(f.Name.Name, "_test") { + // x_test package. We want internal test files only. + continue + } pkgName = f.Name.Name for name := range f.Scope.Objects { if ast.IsExported(name) { @@ -1331,29 +1375,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str // findImport searches for a package with the given symbols. // If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { - pkgDir, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - pkgDir = filepath.Dir(pkgDir) - - // Find candidate packages, looking only at their directory names first. - var candidates []pkgDistance - for _, pkg := range dirScan { - if pkg.dir == pkgDir && pass.f.Name.Name == pkgName { - // The candidate is in the same directory and has the - // same package name. Don't try to import ourselves. - continue - } - if pkgIsCandidate(filename, pkgName, pkg) { - candidates = append(candidates, pkgDistance{ - pkg: pkg, - distance: distance(pkgDir, pkg.dir), - }) - } - } - +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so @@ -1366,7 +1388,6 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, } // Collect exports for packages with matching names. - rescv := make([]chan *pkg, len(candidates)) for i := range candidates { rescv[i] = make(chan *pkg, 1) @@ -1401,7 +1422,9 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, if pass.env.Debug { pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) } - exports, err := loadExportsForPackage(ctx, pass.env, pkgName, c.pkg) + // If we're an x_test, load the package under test's test variant. + includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir + _, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest) if err != nil { if pass.env.Debug { pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) @@ -1438,17 +1461,6 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, return nil, nil } -func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg string, pkg *pkg) ([]string, error) { - pkgName, exports, err := env.GetResolver().loadExports(ctx, pkg) - if err != nil { - return nil, err - } - if expectPkg != pkgName { - return nil, fmt.Errorf("dir %v is package %v, wanted %v", pkg.dir, pkgName, expectPkg) - } - return exports, err -} - // pkgIsCandidate reports whether pkg is a candidate for satisfying the // finding which package pkgIdent in the file named by filename is trying // to refer to. @@ -1461,7 +1473,7 @@ func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg strin // filename is the file being formatted. // pkgIdent is the package being searched for, like "client" (if // searching for "client.New") -func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { +func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { // Check "internal" and "vendor" visibility: if !canUse(filename, pkg.dir) { return false @@ -1479,17 +1491,18 @@ func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { // "bar", which is strongly discouraged // anyway. There's no reason goimports needs // to be slow just to accommodate that. - lastTwo := lastTwoComponents(pkg.importPathShort) - if strings.Contains(lastTwo, pkgIdent) { - return true - } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + for pkgIdent := range refs { + lastTwo := lastTwoComponents(pkg.importPathShort) if strings.Contains(lastTwo, pkgIdent) { return true } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } } - return false } diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index b5c97549536..2e7a317e55b 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -11,6 +11,7 @@ package imports import ( "bufio" "bytes" + "context" "fmt" "go/ast" "go/build" @@ -115,23 +116,23 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e return formatFile(fileSet, file, src, nil, opt) } -// GetAllCandidates gets all of the standard library candidate packages to import in -// sorted order on import path. -func GetAllCandidates(filename string, opt *Options) (pkgs []ImportFix, err error) { - _, opt, err = initialize(filename, nil, opt) +// GetAllCandidates gets all of the packages starting with prefix that can be +// imported by filename, sorted by import path. +func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error { + _, opt, err := initialize(filename, []byte{}, opt) if err != nil { - return nil, err + return err } - return getAllCandidates(filename, opt.Env) + return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env) } // GetPackageExports returns all known packages with name pkg and their exports. -func GetPackageExports(pkg, filename string, opt *Options) (exports []PackageExport, err error) { - _, opt, err = initialize(filename, nil, opt) +func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error { + _, opt, err := initialize(filename, []byte{}, opt) if err != nil { - return nil, err + return err } - return getPackageExports(pkg, filename, opt.Env) + return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env) } // initialize sets the values for opt and src. diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 97660723ae2..1980f59de54 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -13,11 +13,10 @@ import ( "sort" "strconv" "strings" - "sync" + "golang.org/x/mod/module" + "golang.org/x/mod/semver" "golang.org/x/tools/internal/gopathwalk" - "golang.org/x/tools/internal/module" - "golang.org/x/tools/internal/semver" ) // ModuleResolver implements resolver for modules using the go command as little @@ -26,11 +25,14 @@ type ModuleResolver struct { env *ProcessEnv moduleCacheDir string dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. + roots []gopathwalk.Root + scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. + scannedRoots map[gopathwalk.Root]bool - Initialized bool - Main *ModuleJSON - ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... - ModsByDir []*ModuleJSON // ...or Dir. + initialized bool + main *ModuleJSON + modsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... + modsByDir []*ModuleJSON // ...or Dir. // moduleCacheCache stores information about the module cache. moduleCacheCache *dirInfoCache @@ -47,8 +49,17 @@ type ModuleJSON struct { GoVersion string // go version used in module } +func newModuleResolver(e *ProcessEnv) *ModuleResolver { + r := &ModuleResolver{ + env: e, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + func (r *ModuleResolver) init() error { - if r.Initialized { + if r.initialized { return nil } mainMod, vendorEnabled, err := vendorEnabled(r.env) @@ -59,13 +70,13 @@ func (r *ModuleResolver) init() error { if mainMod != nil && vendorEnabled { // Vendor mode is on, so all the non-Main modules are irrelevant, // and we need to search /vendor for everything. - r.Main = mainMod + r.main = mainMod r.dummyVendorMod = &ModuleJSON{ Path: "", Dir: filepath.Join(mainMod.Dir, "vendor"), } - r.ModsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} - r.ModsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} } else { // Vendor mode is off, so run go list -m ... to find everything. r.initAllMods() @@ -73,30 +84,64 @@ func (r *ModuleResolver) init() error { r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod") - sort.Slice(r.ModsByModPath, func(i, j int) bool { + sort.Slice(r.modsByModPath, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.ModsByModPath[x].Path, "/") + return strings.Count(r.modsByModPath[x].Path, "/") } return count(j) < count(i) // descending order }) - sort.Slice(r.ModsByDir, func(i, j int) bool { + sort.Slice(r.modsByDir, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.ModsByDir[x].Dir, "/") + return strings.Count(r.modsByDir[x].Dir, "/") } return count(j) < count(i) // descending order }) + r.roots = []gopathwalk.Root{ + {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, + } + if r.main != nil { + r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) + } + if vendorEnabled { + r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) + } else { + addDep := func(mod *ModuleJSON) { + if mod.Replace == nil { + // This is redundant with the cache, but we'll skip it cheaply enough. + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache}) + } else { + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) + } + } + // Walk dependent modules before scanning the full mod cache, direct deps first. + for _, mod := range r.modsByModPath { + if !mod.Indirect && !mod.Main { + addDep(mod) + } + } + for _, mod := range r.modsByModPath { + if mod.Indirect && !mod.Main { + addDep(mod) + } + } + r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) + } + + r.scannedRoots = map[gopathwalk.Root]bool{} if r.moduleCacheCache == nil { r.moduleCacheCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } } if r.otherCache == nil { r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } } - r.Initialized = true + r.initialized = true return nil } @@ -117,27 +162,35 @@ func (r *ModuleResolver) initAllMods() error { // Can't do anything with a module that's not downloaded. continue } - r.ModsByModPath = append(r.ModsByModPath, mod) - r.ModsByDir = append(r.ModsByDir, mod) + r.modsByModPath = append(r.modsByModPath, mod) + r.modsByDir = append(r.modsByDir, mod) if mod.Main { - r.Main = mod + r.main = mod } } return nil } func (r *ModuleResolver) ClearForNewScan() { + <-r.scanSema + r.scannedRoots = map[gopathwalk.Root]bool{} r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } + r.scanSema <- struct{}{} } func (r *ModuleResolver) ClearForNewMod() { - env := r.env + <-r.scanSema *r = ModuleResolver{ - env: env, + env: r.env, + moduleCacheCache: r.moduleCacheCache, + otherCache: r.otherCache, + scanSema: r.scanSema, } r.init() + r.scanSema <- struct{}{} } // findPackage returns the module and directory that contains the package at @@ -145,7 +198,7 @@ func (r *ModuleResolver) ClearForNewMod() { func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { // This can't find packages in the stdlib, but that's harmless for all // the existing code paths. - for _, m := range r.ModsByModPath { + for _, m := range r.modsByModPath { if !strings.HasPrefix(importPath, m.Path) { continue } @@ -212,7 +265,7 @@ func (r *ModuleResolver) cacheKeys() []string { } // cachePackageName caches the package name for a dir already in the cache. -func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) { +func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { if info.rootType == gopathwalk.RootModuleCache { return r.moduleCacheCache.CachePackageName(info) } @@ -239,7 +292,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { // - in /vendor/ in -mod=vendor mode. // - nested module? Dunno. // Rumor has it that replace targets cannot contain other replace targets. - for _, m := range r.ModsByDir { + for _, m := range r.modsByDir { if !strings.HasPrefix(dir, m.Dir) { continue } @@ -334,41 +387,49 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) ( return names, nil } -func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) { +func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { if err := r.init(); err != nil { - return nil, err + return err } - // Walk GOROOT, GOPATH/pkg/mod, and the main module. - roots := []gopathwalk.Root{ - {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, - } - if r.Main != nil { - roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule}) - } - if r.dummyVendorMod != nil { - roots = append(roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) - } else { - roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) - // Walk replace targets, just in case they're not in any of the above. - for _, mod := range r.ModsByModPath { - if mod.Replace != nil { - roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) - } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + pkg, err := r.canonicalize(info) + if err != nil { + return + } + + if !callback.dirFound(pkg) { + return + } + pkg.packageName, err = r.cachePackageName(info) + if err != nil { + return } - } - roots = filterRoots(roots, exclude) + if !callback.packageNameLoaded(pkg) { + return + } + _, exports, err := r.loadExports(ctx, pkg, false) + if err != nil { + return + } + callback.exportsLoaded(pkg, exports) + } - var result []*pkg - var mu sync.Mutex + // Start processing everything in the cache, and listen for the new stuff + // we discover in the walk below. + stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir) + defer stop1() + stop2 := r.otherCache.ScanAndListen(ctx, processDir) + defer stop2() - // We assume cached directories have not changed. We can skip them and their - // children. + // We assume cached directories are fully cached, including all their + // children, and have not changed. We can skip them. skip := func(root gopathwalk.Root, dir string) bool { - mu.Lock() - defer mu.Unlock() - info, ok := r.cacheLoad(dir) if !ok { return false @@ -380,44 +441,64 @@ func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk return packageScanned } - // Add anything new to the cache. We'll process everything in it below. + // Add anything new to the cache, and process it if we're still listening. add := func(root gopathwalk.Root, dir string) { - mu.Lock() - defer mu.Unlock() - r.cacheStore(r.scanDirForPackage(root, dir)) } - gopathwalk.WalkSkip(roots, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) - - // Everything we already had, and everything new, is now in the cache. - for _, dir := range r.cacheKeys() { - info, ok := r.cacheLoad(dir) - if !ok { - continue - } - - // Skip this directory if we were not able to get the package information successfully. - if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { - continue - } + // r.roots and the callback are not necessarily safe to use in the + // goroutine below. Process them eagerly. + roots := filterRoots(r.roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + // We have the lock on r.scannedRoots, and no other scans can run. + for _, root := range roots { + if ctx.Err() != nil { + return + } - // If we want package names, make sure the cache has them. - if loadNames { - var err error - if info, err = r.cachePackageName(info); err != nil { + if r.scannedRoots[root] { continue } + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) + r.scannedRoots[root] = true } + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} - res, err := r.canonicalize(info) - if err != nil { - continue - } - result = append(result, res) +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { + if _, ok := stdlib[path]; ok { + return MaxRelevance } + mod, _ := r.findPackage(path) + return modRelevance(mod) +} - return result, nil +func modRelevance(mod *ModuleJSON) int { + switch { + case mod == nil: // out of scope + return MaxRelevance - 4 + case mod.Indirect: + return MaxRelevance - 3 + case !mod.Main: + return MaxRelevance - 2 + default: + return MaxRelevance - 1 // main module ties with stdlib + } } // canonicalize gets the result of canonicalizing the packages using the results @@ -429,19 +510,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { importPathShort: info.nonCanonicalImportPath, dir: info.dir, packageName: path.Base(info.nonCanonicalImportPath), - relevance: 0, + relevance: MaxRelevance, }, nil } importPath := info.nonCanonicalImportPath - relevance := 3 + mod := r.findModuleByDir(info.dir) // Check if the directory is underneath a module that's in scope. - if mod := r.findModuleByDir(info.dir); mod != nil { - if mod.Indirect { - relevance = 2 - } else { - relevance = 1 - } + if mod != nil { // It is. If dir is the target of a replace directive, // our guessed import path is wrong. Use the real one. if mod.Dir == info.dir { @@ -450,15 +526,16 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { dirInMod := info.dir[len(mod.Dir)+len("/"):] importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) } - } else if info.needsReplace { + } else if !strings.HasPrefix(importPath, info.moduleName) { + // The module's name doesn't match the package's import path. It + // probably needs a replace directive we don't have. return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir) } res := &pkg{ importPathShort: importPath, dir: info.dir, - packageName: info.packageName, // may not be populated if the caller didn't ask for it - relevance: relevance, + relevance: modRelevance(mod), } // We may have discovered a package that has a different version // in scope already. Canonicalize to that one if possible. @@ -468,14 +545,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { return res, nil } -func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { +func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { if err := r.init(); err != nil { return "", nil, err } - if info, ok := r.cacheLoad(pkg.dir); ok { + if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { return r.cacheExports(ctx, r.env, info) } - return loadExportsFromFiles(ctx, r.env, pkg.dir) + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) } func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { @@ -493,7 +570,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir } switch root.Type { case gopathwalk.RootCurrentModule: - importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir)) + importPath = path.Join(r.main.Path, filepath.ToSlash(subdir)) case gopathwalk.RootModuleCache: matches := modCacheRegexp.FindStringSubmatch(subdir) if len(matches) == 0 { @@ -502,7 +579,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir err: fmt.Errorf("invalid module cache path: %v", subdir), } } - modPath, err := module.DecodePath(filepath.ToSlash(matches[1])) + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) if err != nil { if r.env.Debug { r.env.Logf("decoding module cache path %q: %v", subdir, err) @@ -521,7 +598,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir dir: dir, rootType: root.Type, nonCanonicalImportPath: importPath, - needsReplace: false, moduleDir: modDir, moduleName: modName, } @@ -529,14 +605,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir // stdlib packages are always in scope, despite the confusing go.mod return result } - // Check that this package is not obviously impossible to import. - if !strings.HasPrefix(importPath, modName) { - // The module's declared path does not match - // its expected path. It probably needs a - // replace directive we don't have. - result.needsReplace = true - } - return result } diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index f6b070a3f6e..5b4f03accdd 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -49,10 +49,6 @@ type directoryPackageInfo struct { // nonCanonicalImportPath is the package's expected import path. It may // not actually be importable at that path. nonCanonicalImportPath string - // needsReplace is true if the nonCanonicalImportPath does not match the - // module's declared path, making it impossible to import without a - // replace directive. - needsReplace bool // Module-related information. moduleDir string // The directory that is the module root of this dir. @@ -97,15 +93,86 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( type dirInfoCache struct { mu sync.Mutex // dirs stores information about packages in directories, keyed by absolute path. - dirs map[string]*directoryPackageInfo + dirs map[string]*directoryPackageInfo + listeners map[*int]cacheListener +} + +type cacheListener func(directoryPackageInfo) + +// ScanAndListen calls listener on all the items in the cache, and on anything +// newly added. The returned stop function waits for all in-flight callbacks to +// finish and blocks new ones. +func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { + ctx, cancel := context.WithCancel(ctx) + + // Flushing out all the callbacks is tricky without knowing how many there + // are going to be. Setting an arbitrary limit makes it much easier. + const maxInFlight = 10 + sema := make(chan struct{}, maxInFlight) + for i := 0; i < maxInFlight; i++ { + sema <- struct{}{} + } + + cookie := new(int) // A unique ID we can use for the listener. + + // We can't hold mu while calling the listener. + d.mu.Lock() + var keys []string + for key := range d.dirs { + keys = append(keys, key) + } + d.listeners[cookie] = func(info directoryPackageInfo) { + select { + case <-ctx.Done(): + return + case <-sema: + } + listener(info) + sema <- struct{}{} + } + d.mu.Unlock() + + stop := func() { + cancel() + d.mu.Lock() + delete(d.listeners, cookie) + d.mu.Unlock() + for i := 0; i < maxInFlight; i++ { + <-sema + } + } + + // Process the pre-existing keys. + for _, k := range keys { + select { + case <-ctx.Done(): + return stop + default: + } + if v, ok := d.Load(k); ok { + listener(v) + } + } + + return stop } // Store stores the package info for dir. func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { d.mu.Lock() - defer d.mu.Unlock() - stored := info // defensive copy - d.dirs[dir] = &stored + _, old := d.dirs[dir] + d.dirs[dir] = &info + var listeners []cacheListener + for _, l := range d.listeners { + listeners = append(listeners, l) + } + d.mu.Unlock() + + if !old { + for _, l := range listeners { + l(info) + } + } } // Load returns a copy of the directoryPackageInfo for absolute directory dir. @@ -129,17 +196,17 @@ func (d *dirInfoCache) Keys() (keys []string) { return keys } -func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) { +func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { if loaded, err := info.reachedStatus(nameLoaded); loaded { - return info, err + return info.packageName, err } if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { - return info, fmt.Errorf("cannot read package name, scan error: %v", err) + return "", fmt.Errorf("cannot read package name, scan error: %v", err) } info.packageName, info.err = packageDirToName(info.dir) info.status = nameLoaded d.Store(info.dir, info) - return info, info.err + return info.packageName, info.err } func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { @@ -149,8 +216,8 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d if reached, err := info.reachedStatus(nameLoaded); reached && err != nil { return "", nil, err } - info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir) - if info.err == context.Canceled { + info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false) + if info.err == context.Canceled || info.err == context.DeadlineExceeded { return info.packageName, info.exports, info.err } // The cache structure wants things to proceed linearly. We can skip a diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 00000000000..0c0dbb6a9da --- /dev/null +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,4 @@ +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +var GetForTest = func(p interface{}) string { return "" } diff --git a/vendor/modules.txt b/vendor/modules.txt index 94f523d32fb..f0695bcffcf 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -419,6 +419,11 @@ github.com/elastic/ecs/code/go/ecs github.com/elastic/elastic-agent-client/v7/pkg/client github.com/elastic/elastic-agent-client/v7/pkg/proto github.com/elastic/elastic-agent-client/v7/pkg/utils +# github.com/elastic/go-concert v0.0.2 +github.com/elastic/go-concert +github.com/elastic/go-concert/atomic +github.com/elastic/go-concert/chorus +github.com/elastic/go-concert/unison # github.com/elastic/go-libaudit/v2 v2.0.0-20200515221334-92371bef3fb8 github.com/elastic/go-libaudit/v2 github.com/elastic/go-libaudit/v2/aucoalesce @@ -920,9 +925,12 @@ golang.org/x/crypto/ssh/terminal # golang.org/x/exp v0.0.0-20191227195350-da58074b4299 golang.org/x/exp/apidiff golang.org/x/exp/cmd/apidiff -# golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f +# golang.org/x/lint v0.0.0-20200130185559-910be7a94367 golang.org/x/lint golang.org/x/lint/golint +# golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee +golang.org/x/mod/module +golang.org/x/mod/semver # golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/net/bpf golang.org/x/net/context @@ -990,7 +998,7 @@ golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm # golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 +# golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2 golang.org/x/tools/cmd/goimports golang.org/x/tools/cmd/stringer golang.org/x/tools/go/analysis @@ -1008,8 +1016,7 @@ golang.org/x/tools/go/vcs golang.org/x/tools/internal/fastwalk golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports -golang.org/x/tools/internal/module -golang.org/x/tools/internal/semver +golang.org/x/tools/internal/packagesinternal # golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 golang.org/x/xerrors golang.org/x/xerrors/internal