diff --git a/CHANGELOG.md b/CHANGELOG.md
index e94c68112fd..89956e6c65d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,8 +1,16 @@
# Changelog
-## 1.22.0-dev
+## 1.23.0-dev
+* Feature - Add support for ECS Secrets integrating with AWS Secrets Manager [#1713](https://github.com/aws/amazon-ecs-agent/pull/1713)
+* Enhancement - Add availability zone to task metadata endpoint and ECS metadata file
+* Bug - Fixed a bug where agent can register container instance back to back and gets
+ assigned two container instance ARNs [#1711](https://github.com/aws/amazon-ecs-agent/pull/1711)
+* Bug - Fixed a bug where propagated `aws:` tags are passed through RegisterContainerInstance API call [#1706](https://github.com/aws/amazon-ecs-agent/pull/1706)
+
+## 1.22.0
* Feature - Add support for ECS Secrets integrating with AWS Systems Manager Parameter Store
* Feature - Support for `--pid`, `--ipc` Docker run flags. [#1584](https://github.com/aws/amazon-ecs-agent/pull/1584)
+* Feature - Introduce two environment variables `ECS_CONTAINER_INSTANCE_PROPAGATE_TAGS_FROM` and `ECS_CONTAINER_INSTANCE_TAGS` to support ECS tagging [#1618](https://github.com/aws/amazon-ecs-agent/pull/1618)
## 1.21.0
* Feature - Add v3 task metadata support for awsvpc, host and bridge network mode
diff --git a/Makefile b/Makefile
index 8d58fb2f602..7f374645d98 100644
--- a/Makefile
+++ b/Makefile
@@ -15,6 +15,7 @@ USERID=$(shell id -u)
GO_EXECUTABLE=$(shell command -v go 2> /dev/null)
.PHONY: all gobuild static docker release certs test clean netkitten test-registry namespace-tests run-functional-tests benchmark-test gogenerate run-integ-tests pause-container get-cni-sources cni-plugins test-artifacts
+BUILD_PLATFORM:=$(shell uname -m)
all: docker
@@ -88,8 +89,13 @@ misc/certs/ca-certificates.crt:
docker build -t "amazon/amazon-ecs-agent-cert-source:make" misc/certs/
docker run "amazon/amazon-ecs-agent-cert-source:make" cat /etc/ssl/certs/ca-certificates.crt > misc/certs/ca-certificates.crt
-test:
+ifeq (${BUILD_PLATFORM},aarch64)
+test::
+ . ./scripts/shared_env && go test -tags unit -timeout=25s -v -cover $(shell go list ./agent/... | grep -v /vendor/)
+else
+test::
. ./scripts/shared_env && go test -race -tags unit -timeout=25s -v -cover $(shell go list ./agent/... | grep -v /vendor/)
+endif
test-silent:
. ./scripts/shared_env && go test -timeout=25s -cover $(shell go list ./agent/... | grep -v /vendor/)
@@ -123,7 +129,13 @@ endef
# TODO: use `go list -f` to target the test files more directly
ALL_GO_FILES = $(shell find . -name "*.go" -print | tr "\n" " ")
+
+ifeq (${BUILD_PLATFORM},aarch64)
+GO_INTEG_TEST = go test -tags integration -c -o
+else
GO_INTEG_TEST = go test -race -tags integration -c -o
+endif
+
out/test-artifacts/linux-engine-tests: $(ALL_GO_FILES) .out-stamp .builder-image-stamp
$(call dockerbuild,$(GO_INTEG_TEST) $@ ./agent/engine)
@@ -234,11 +246,21 @@ cni-plugins: get-cni-sources .out-stamp
"amazon/amazon-ecs-build-cniplugins:make"
@echo "Built amazon-ecs-cni-plugins successfully."
+ifeq (${BUILD_PLATFORM},aarch64)
run-integ-tests: test-registry gremlin container-health-check-image run-sudo-tests
- . ./scripts/shared_env && go test -race -tags integration -timeout=10m -v ./agent/engine/... ./agent/stats/... ./agent/app/...
-
-run-sudo-tests:
+ . ./scripts/shared_env && go test -tags integration -timeout=20m -v ./agent/engine/... ./agent/stats/... ./agent/app/...agent
+else
+run-integ-tests: test-registry gremlin container-health-check-image run-sudo-tests
+ . ./scripts/shared_env && go test -race -tags integration -timeout=12m -v ./agent/engine/... ./agent/stats/... ./agent/app/...
+endif
+
+ifeq (${BUILD_PLATFORM},aarch64)
+run-sudo-tests::
+ . ./scripts/shared_env && sudo -E ${GO_EXECUTABLE} test -tags sudo -timeout=10m -v ./agent/engine/...
+else
+run-sudo-tests::
. ./scripts/shared_env && sudo -E ${GO_EXECUTABLE} test -race -tags sudo -timeout=1m -v ./agent/engine/...
+endif
.PHONY: codebuild
codebuild: test-artifacts .out-stamp
diff --git a/README.md b/README.md
index 7e6d7e59e0a..a3fe6f72225 100644
--- a/README.md
+++ b/README.md
@@ -132,6 +132,8 @@ additional details on each available environment variable.
| `ECS_ENABLE_CPU_UNBOUNDED_WINDOWS_WORKAROUND` | `true` | When `true`, ECS will allow CPU unbounded(CPU=`0`) tasks to run along with CPU bounded tasks in Windows. | Not applicable | `false` |
| `ECS_TASK_METADATA_RPS_LIMIT` | `100,150` | Comma separated integer values for steady state and burst throttle limits for task metadata endpoint | `40,60` | `40,60` |
| `ECS_SHARED_VOLUME_MATCH_FULL_CONFIG` | `true` | When `true`, ECS Agent will compare name, driver options, and labels to make sure volumes are identical. When `false`, Agent will short circuit shared volume comparison if the names match. This is the default Docker behavior. If a volume is shared across instances, this should be set to `false`. | `false` | `false`|
+| `ECS_CONTAINER_INSTANCE_PROPAGATE_TAGS_FROM` | `ec2_instance` | If `ec2_instance` is specified, existing tags defined on the container instance will be registered to Amazon ECS and will be discoverable using the `ListTagsForResource` API. Using this requires that the IAM role associated with the container instance have the `ec2:DescribeTags` action allowed. | `none` | `none` |
+| `ECS_CONTAINER_INSTANCE_TAGS` | `{"tag_key": "tag_val"}` | The metadata that you apply to the container instance to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters. If tags also exist on your container instance that are propagated using the `ECS_CONTAINER_INSTANCE_PROPAGATE_TAGS_FROM` parameter, those tags will be overwritten by the tags specified using `ECS_CONTAINER_INSTANCE_TAGS`. | `{}` | `{}` |
### Persistence
diff --git a/THIRD-PARTY b/THIRD-PARTY
index aec4680d41e..c901c7507f3 100644
--- a/THIRD-PARTY
+++ b/THIRD-PARTY
@@ -3,6 +3,7 @@
** github.com/containerd/continuity; version 1bed1ecb1dc42d8f4d2ac8c23e5cac64749e82c9 -- https://github.com/containerd/continuity
** github.com/containernetworking/cni; version v0.5.2 -- https://github.com/containernetworking/cni
** github.com/coreos/go-systemd; version 24036eb3df68550d24a2736c5d013f4e83366866 -- https://github.com/coreos/go-systemd
+** github.com/docker/distribution; version 749f6afb4572201e3c37325d0ffedb6f32be8950 -- https://www.docker.com
** github.com/docker/docker; version e4d0fe84f9ea88b0e0cfd847412c9f29442cc62d -- https://github.com/moby/moby
** github.com/docker/go-connections; version v0.3.0 -- https://github.com/docker/go-connections
** github.com/docker/go-units; version v0.3.2 -- https://github.com/docker/go-units
@@ -13,6 +14,7 @@
** github.com/opencontainers/image-spec; version v1.0.1 -- https://github.com/opencontainers/image-spec
** github.com/opencontainers/runc; version v0.1.1 -- https://github.com/opencontainers/runc
** github.com/opencontainers/runtime-spec; version d349388c43b01b2ea695965ae561b5bddb81318f -- https://github.com/opencontainers/runtime-spec
+** github.com/prometheus/client_golang; version 0.9.0 -- https://github.com/prometheus/client_golang
** github.com/vishvananda/netlink; version fe3b5664d23a11b52ba59bece4ff29c52772a56b -- https://github.com/vishvananda/netlink
** github.com/vishvananda/netns; version be1fbeda19366dea804f00efff2dd73a1642fdcc -- https://github.com/vishvananda/netns
@@ -213,6 +215,8 @@ Copyright (c) 2016-2017 the containerd authors
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
* For github.com/coreos/go-systemd see also this required NOTICE:
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+* For github.com/docker/distribution see also this required NOTICE:
+Copyright 2013-2017 Docker, Inc.
* For github.com/docker/docker see also this required NOTICE:
Copyright 2013-2017 Docker, Inc.
* For github.com/docker/go-connections see also this required NOTICE:
@@ -234,6 +238,32 @@ Copyright 2016 The Linux Foundation.
Copyright 2014 Docker, Inc.
* For github.com/opencontainers/runtime-spec see also this required NOTICE:
Copyright 2015 The Linux Foundation.
+* For github.com/prometheus/client_golang see also this required NOTICE:
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license
+details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language
+(golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
* For github.com/vishvananda/netlink see also this required NOTICE:
Copyright 2014 Vishvananda Ishaya.
Copyright 2014 Docker, Inc.
diff --git a/VERSION b/VERSION
index 3500250a4b0..57807d6d0d0 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.21.0
+1.22.0
diff --git a/agent/Gopkg.lock b/agent/Gopkg.lock
index f99bbeb6e7e..20ef2af40f3 100644
--- a/agent/Gopkg.lock
+++ b/agent/Gopkg.lock
@@ -9,14 +9,6 @@
revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f"
version = "v0.4.7"
-[[projects]]
- branch = "master"
- digest = "1:3721a10686511b80c052323423f0de17a8c06d417dbdd3b392b1578432a33aae"
- name = "github.com/Nvveen/Gotty"
- packages = ["."]
- pruneopts = "UT"
- revision = "cd527374f1e5bff4938207604a14f2e38a9cf512"
-
[[projects]]
digest = "1:1a734d8f35c6917ddd7b4900c71d42aba41804972c3e464dee419117eaefd86c"
name = "github.com/aws/aws-sdk-go"
@@ -67,6 +59,14 @@
revision = "870fa4307c5f57c14153c695df7b87af1acdcbf1"
version = "v1.15.66"
+[[projects]]
+ branch = "master"
+ digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
+ name = "github.com/beorn7/perks"
+ packages = ["quantile"]
+ pruneopts = "UT"
+ revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
+
[[projects]]
digest = "1:50e893a85575fa48dc4982a279e50e2fd8b74e4f7c587860c1e25c77083b8125"
name = "github.com/cihub/seelog"
@@ -205,14 +205,6 @@
revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52"
version = "v0.3.2"
-[[projects]]
- digest = "1:01245be1912f74ab0527574ccd2dbea00cc7b2e304062c49b448b78a8991d802"
- name = "github.com/go-ini/ini"
- packages = ["."]
- pruneopts = "UT"
- revision = "7e7da451323b6766da368f8a1e8ec9a88a16b4a0"
- version = "v1.31.1"
-
[[projects]]
digest = "1:57fa4c058c21ce25d0b7272518dd746065117abf6cc706158b0d361202024520"
name = "github.com/godbus/dbus"
@@ -240,6 +232,14 @@
revision = "c34cdb4725f4c3844d095133c6e40e448b86589b"
version = "v1.1.1"
+[[projects]]
+ digest = "1:97df918963298c287643883209a2c3f642e6593379f97ab400c2a2e219ab647d"
+ name = "github.com/golang/protobuf"
+ packages = ["proto"]
+ pruneopts = "UT"
+ revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
+ version = "v1.2.0"
+
[[projects]]
digest = "1:c79fb010be38a59d657c48c6ba1d003a8aa651fa56b579d959d74573b7dff8e1"
name = "github.com/gorilla/context"
@@ -279,6 +279,14 @@
revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
version = "v1.0.1"
+[[projects]]
+ digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
+ name = "github.com/matttproud/golang_protobuf_extensions"
+ packages = ["pbutil"]
+ pruneopts = "UT"
+ revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
+ version = "v1.0.1"
+
[[projects]]
digest = "1:ee4d4af67d93cc7644157882329023ce9a7bcfce956a079069a9405521c7cc8d"
name = "github.com/opencontainers/go-digest"
@@ -335,6 +343,51 @@
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
+[[projects]]
+ digest = "1:20a8a18488bdef471795af0413d9a3c9c35dc24ca93342329b884ba3c15cbaab"
+ name = "github.com/prometheus/client_golang"
+ packages = [
+ "prometheus",
+ "prometheus/internal",
+ "prometheus/promhttp",
+ ]
+ pruneopts = "UT"
+ revision = "1cafe34db7fdec6022e17e00e1c1ea501022f3e4"
+ version = "v0.9.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
+ name = "github.com/prometheus/client_model"
+ packages = ["go"]
+ pruneopts = "UT"
+ revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
+
+[[projects]]
+ branch = "master"
+ digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4"
+ name = "github.com/prometheus/common"
+ packages = [
+ "expfmt",
+ "internal/bitbucket.org/ww/goautoneg",
+ "model",
+ ]
+ pruneopts = "UT"
+ revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6"
+
+[[projects]]
+ branch = "master"
+ digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc"
+ name = "github.com/prometheus/procfs"
+ packages = [
+ ".",
+ "internal/util",
+ "nfs",
+ "xfs",
+ ]
+ pruneopts = "UT"
+ revision = "185b4288413d2a0dd0806f78c90dde719829e5ae"
+
[[projects]]
digest = "1:3f53e9e4dfbb664cd62940c9c4b65a2171c66acd0b7621a1a6b8e78513525a52"
name = "github.com/sirupsen/logrus"
@@ -472,11 +525,14 @@
"github.com/docker/go-units",
"github.com/golang/mock/gomock",
"github.com/golang/mock/mockgen/model",
+ "github.com/golang/protobuf/proto",
"github.com/gorilla/mux",
"github.com/gorilla/websocket",
"github.com/opencontainers/runtime-spec/specs-go",
"github.com/pborman/uuid",
"github.com/pkg/errors",
+ "github.com/prometheus/client_golang/prometheus",
+ "github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require",
"github.com/stretchr/testify/suite",
diff --git a/agent/Gopkg.toml b/agent/Gopkg.toml
index 537bec1da96..1c43ada25d0 100644
--- a/agent/Gopkg.toml
+++ b/agent/Gopkg.toml
@@ -88,3 +88,7 @@ required = ["github.com/golang/mock/mockgen/model"]
[[constraint]]
name = "github.com/gorilla/mux"
version = "1.6.2"
+
+[[constraint]]
+ name = "github.com/prometheus/client_golang"
+ version = "0.9.0"
\ No newline at end of file
diff --git a/agent/acs/model/api/api-2.json b/agent/acs/model/api/api-2.json
index ce0fa8f37d3..28987eda3f5 100644
--- a/agent/acs/model/api/api-2.json
+++ b/agent/acs/model/api/api-2.json
@@ -437,7 +437,10 @@
},
"SecretProvider":{
"type":"string",
- "enum":["ssm"]
+ "enum":[
+ "ssm",
+ "asm"
+ ]
},
"SecretType":{
"type":"string",
diff --git a/agent/api/container/container.go b/agent/api/container/container.go
index 3d0080dabbb..d9888ff7776 100644
--- a/agent/api/container/container.go
+++ b/agent/api/container/container.go
@@ -57,6 +57,12 @@ const (
// SecretProviderSSM is to show secret provider being SSM
SecretProviderSSM = "ssm"
+
+ // SecretProviderASM is to show secret provider being ASM
+ SecretProviderASM = "asm"
+
+ // SecretTypeEnv is to show secret type being ENVIRONMENT_VARIABLE
+ SecretTypeEnv = "ENVIRONMENT_VARIABLE"
)
// DockerConfig represents additional metadata about a container to run. It's
@@ -257,9 +263,9 @@ type Secret struct {
Provider string `json:"provider"`
}
-// GetSSMSecretResourceCacheKey returns the key required to access the secret
+// GetSecretResourceCacheKey returns the key required to access the secret
// from the ssmsecret resource
-func (s *Secret) GetSSMSecretResourceCacheKey() string {
+func (s *Secret) GetSecretResourceCacheKey() string {
return s.ValueFrom + "_" + s.Region
}
@@ -752,7 +758,7 @@ func (c *Container) ShouldCreateWithSSMSecret() bool {
c.lock.RLock()
defer c.lock.RUnlock()
- //Secrets field will be nil if there is no secrets for container
+ // Secrets field will be nil if there is no secrets for container
if c.Secrets == nil {
return false
}
@@ -765,6 +771,25 @@ func (c *Container) ShouldCreateWithSSMSecret() bool {
return false
}
+// ShouldCreateWithASMSecret returns true if this container needs to get secret
+// value from AWS Secrets Manager
+func (c *Container) ShouldCreateWithASMSecret() bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+
+ // Secrets field will be nil if there is no secrets for container
+ if c.Secrets == nil {
+ return false
+ }
+
+ for _, secret := range c.Secrets {
+ if secret.Provider == SecretProviderASM {
+ return true
+ }
+ }
+ return false
+}
+
// MergeEnvironmentVariables appends additional envVarName:envVarValue pairs to
// the the container's enviornment values structure
func (c *Container) MergeEnvironmentVariables(envVars map[string]string) {
@@ -775,8 +800,23 @@ func (c *Container) MergeEnvironmentVariables(envVars map[string]string) {
if c.Environment == nil {
c.Environment = make(map[string]string)
}
-
for k, v := range envVars {
c.Environment[k] = v
}
}
+
+func (c *Container) HasSecretAsEnv() bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+
+ // Secrets field will be nil if there is no secrets for container
+ if c.Secrets == nil {
+ return false
+ }
+ for _, secret := range c.Secrets {
+ if secret.Type == SecretTypeEnv {
+ return true
+ }
+ }
+ return false
+}
diff --git a/agent/api/container/container_test.go b/agent/api/container/container_test.go
index bfa5145bd41..921e09ebe19 100644
--- a/agent/api/container/container_test.go
+++ b/agent/api/container/container_test.go
@@ -376,3 +376,82 @@ func TestMergeEnvironmentVariables(t *testing.T) {
})
}
}
+
+func TestShouldCreateWithASMSecret(t *testing.T) {
+ cases := []struct {
+ in Container
+ out bool
+ }{
+ {Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: []Secret{
+ Secret{
+ Provider: "asm",
+ Name: "secret",
+ ValueFrom: "/test/secretName",
+ }},
+ }, true},
+ {Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: nil,
+ }, false},
+ {Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: []Secret{
+ Secret{
+ Provider: "ssm",
+ Name: "secret",
+ ValueFrom: "/test/secretName",
+ }},
+ }, false},
+ }
+
+ for _, test := range cases {
+ container := test.in
+ assert.Equal(t, test.out, container.ShouldCreateWithASMSecret())
+ }
+}
+
+func TestHasSecretAsEnv(t *testing.T) {
+ cases := []struct {
+ in Container
+ out bool
+ }{
+ {Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: []Secret{
+ Secret{
+ Provider: "asm",
+ Name: "secret",
+ Type: "ENVIRONMENT_VARIABLE",
+ ValueFrom: "/test/secretName",
+ }},
+ }, true},
+ {Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: nil,
+ }, false},
+ {Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: []Secret{
+ Secret{
+ Provider: "asm",
+ Name: "secret",
+ Type: "MOUNT_POINT",
+ ValueFrom: "/test/secretName",
+ }},
+ }, false},
+ }
+
+ for _, test := range cases {
+ container := test.in
+ assert.Equal(t, test.out, container.HasSecretAsEnv())
+ }
+
+}
diff --git a/agent/api/ecsclient/client.go b/agent/api/ecsclient/client.go
index 567f6198c68..e7d247c07cb 100644
--- a/agent/api/ecsclient/client.go
+++ b/agent/api/ecsclient/client.go
@@ -41,6 +41,7 @@ const (
pollEndpointCacheSize = 1
pollEndpointCacheTTL = 20 * time.Minute
roundtripTimeout = 5 * time.Second
+ azAttrName = "ecs.availability-zone"
)
// APIECSClient implements ECSClient
@@ -108,7 +109,7 @@ func (client *APIECSClient) CreateCluster(clusterName string) (string, error) {
// instance ARN allows a container instance to update its registered
// resources.
func (client *APIECSClient) RegisterContainerInstance(containerInstanceArn string,
- attributes []*ecs.Attribute, tags []*ecs.Tag) (string, error) {
+ attributes []*ecs.Attribute, tags []*ecs.Tag, registrationToken string) (string, string, error) {
clusterRef := client.config.Cluster
// If our clusterRef is empty, we should try to create the default
if clusterRef == "" {
@@ -119,22 +120,22 @@ func (client *APIECSClient) RegisterContainerInstance(containerInstanceArn strin
}()
// Attempt to register without checking existence of the cluster so we don't require
// excess permissions in the case where the cluster already exists and is active
- containerInstanceArn, err := client.registerContainerInstance(clusterRef, containerInstanceArn, attributes, tags)
+ containerInstanceArn, availabilityzone, err := client.registerContainerInstance(clusterRef, containerInstanceArn, attributes, tags, registrationToken)
if err == nil {
- return containerInstanceArn, nil
+ return containerInstanceArn, availabilityzone, nil
}
// If trying to register fails, try to create the cluster before calling
// register again
clusterRef, err = client.CreateCluster(clusterRef)
if err != nil {
- return "", err
+ return "", "", err
}
}
- return client.registerContainerInstance(clusterRef, containerInstanceArn, attributes, tags)
+ return client.registerContainerInstance(clusterRef, containerInstanceArn, attributes, tags, registrationToken)
}
func (client *APIECSClient) registerContainerInstance(clusterRef string, containerInstanceArn string,
- attributes []*ecs.Attribute, tags []*ecs.Tag) (string, error) {
+ attributes []*ecs.Attribute, tags []*ecs.Tag, registrationToken string) (string, string, error) {
registerRequest := ecs.RegisterContainerInstanceInput{Cluster: &clusterRef}
var registrationAttributes []*ecs.Attribute
if containerInstanceArn != "" {
@@ -164,18 +165,31 @@ func (client *APIECSClient) registerContainerInstance(clusterRef string, contain
resources, err := client.getResources()
if err != nil {
- return "", err
+ return "", "", err
}
registerRequest.TotalResources = resources
+
+ registerRequest.ClientToken = ®istrationToken
resp, err := client.standardClient.RegisterContainerInstance(®isterRequest)
if err != nil {
seelog.Errorf("Unable to register as a container instance with ECS: %v", err)
- return "", err
+ return "", "", err
}
+
+ var availabilityzone = ""
+ if resp != nil {
+ for _, attr := range resp.ContainerInstance.Attributes {
+ if aws.StringValue(attr.Name) == azAttrName {
+ availabilityzone = aws.StringValue(attr.Value)
+ break
+ }
+ }
+ }
+
seelog.Info("Registered container instance with cluster!")
err = validateRegisteredAttributes(registerRequest.Attributes, resp.ContainerInstance.Attributes)
- return aws.StringValue(resp.ContainerInstance.ContainerInstanceArn), err
+ return aws.StringValue(resp.ContainerInstance.ContainerInstanceArn), availabilityzone, err
}
func (client *APIECSClient) setInstanceIdentity(registerRequest ecs.RegisterContainerInstanceInput) ecs.RegisterContainerInstanceInput {
diff --git a/agent/api/ecsclient/client_test.go b/agent/api/ecsclient/client_test.go
index f48519b5120..c01a3bc85f4 100644
--- a/agent/api/ecsclient/client_test.go
+++ b/agent/api/ecsclient/client_test.go
@@ -47,6 +47,7 @@ const (
configuredCluster = "mycluster"
iid = "instanceIdentityDocument"
iidSignature = "signature"
+ registrationToken = "clientToken"
)
var (
@@ -329,7 +330,8 @@ func TestReRegisterContainerInstance(t *testing.T) {
fakeCapabilities := []string{"capability1", "capability2"}
expectedAttributes := map[string]string{
- "ecs.os-type": config.OSType,
+ "ecs.os-type": config.OSType,
+ "ecs.availability-zone": "us-west-2b",
}
for i := range fakeCapabilities {
expectedAttributes[fakeCapabilities[i]] = ""
@@ -342,6 +344,7 @@ func TestReRegisterContainerInstance(t *testing.T) {
mc.EXPECT().RegisterContainerInstance(gomock.Any()).Do(func(req *ecs.RegisterContainerInstanceInput) {
assert.Equal(t, "arn:test", *req.ContainerInstanceArn, "Wrong container instance ARN")
assert.Equal(t, configuredCluster, *req.Cluster, "Wrong cluster")
+ assert.Equal(t, registrationToken, *req.ClientToken, "Wrong client token")
assert.Equal(t, iid, *req.InstanceIdentityDocument, "Wrong IID")
assert.Equal(t, iidSignature, *req.InstanceIdentityDocumentSignature, "Wrong IID sig")
assert.Equal(t, 4, len(req.TotalResources), "Wrong length of TotalResources")
@@ -375,13 +378,11 @@ func TestReRegisterContainerInstance(t *testing.T) {
nil),
)
- arn, err := client.RegisterContainerInstance("arn:test", capabilities, containerInstanceTags)
- if err != nil {
- t.Errorf("Should not be an error: %v", err)
- }
- if arn != "registerArn" {
- t.Errorf("Wrong arn: %v", arn)
- }
+ arn, availabilityzone, err := client.RegisterContainerInstance("arn:test", capabilities, containerInstanceTags, registrationToken)
+
+ assert.NoError(t, err)
+ assert.Equal(t, "registerArn", arn)
+ assert.Equal(t, "us-west-2b", availabilityzone, "availabilityZone is incorrect")
}
func TestRegisterContainerInstance(t *testing.T) {
@@ -398,6 +399,7 @@ func TestRegisterContainerInstance(t *testing.T) {
"ecs.os-type": config.OSType,
"my_custom_attribute": "Custom_Value1",
"my_other_custom_attribute": "Custom_Value2",
+ "ecs.availability-zone": "us-west-2b",
}
capabilities := buildAttributeList(fakeCapabilities, nil)
@@ -407,6 +409,7 @@ func TestRegisterContainerInstance(t *testing.T) {
mc.EXPECT().RegisterContainerInstance(gomock.Any()).Do(func(req *ecs.RegisterContainerInstanceInput) {
assert.Nil(t, req.ContainerInstanceArn)
assert.Equal(t, configuredCluster, *req.Cluster, "Wrong cluster")
+ assert.Equal(t, registrationToken, *req.ClientToken, "Wrong client token")
assert.Equal(t, iid, *req.InstanceIdentityDocument, "Wrong IID")
assert.Equal(t, iidSignature, *req.InstanceIdentityDocumentSignature, "Wrong IID sig")
assert.Equal(t, 4, len(req.TotalResources), "Wrong length of TotalResources")
@@ -435,9 +438,10 @@ func TestRegisterContainerInstance(t *testing.T) {
nil),
)
- arn, err := client.RegisterContainerInstance("", capabilities, containerInstanceTags)
+ arn, availabilityzone, err := client.RegisterContainerInstance("", capabilities, containerInstanceTags, registrationToken)
assert.NoError(t, err)
assert.Equal(t, "registerArn", arn)
+ assert.Equal(t, "us-west-2b", availabilityzone)
}
func TestRegisterContainerInstanceNoIID(t *testing.T) {
@@ -460,6 +464,7 @@ func TestRegisterContainerInstanceNoIID(t *testing.T) {
"ecs.os-type": config.OSType,
"my_custom_attribute": "Custom_Value1",
"my_other_custom_attribute": "Custom_Value2",
+ "ecs.availability-zone": "us-west-2b",
}
capabilities := buildAttributeList(fakeCapabilities, nil)
@@ -467,6 +472,7 @@ func TestRegisterContainerInstanceNoIID(t *testing.T) {
mc.EXPECT().RegisterContainerInstance(gomock.Any()).Do(func(req *ecs.RegisterContainerInstanceInput) {
assert.Nil(t, req.ContainerInstanceArn)
assert.Equal(t, configuredCluster, *req.Cluster, "Wrong cluster")
+ assert.Equal(t, registrationToken, *req.ClientToken, "Wrong client token")
assert.Equal(t, "", *req.InstanceIdentityDocument, "Wrong IID")
assert.Equal(t, "", *req.InstanceIdentityDocumentSignature, "Wrong IID sig")
assert.Equal(t, 4, len(req.TotalResources), "Wrong length of TotalResources")
@@ -495,9 +501,10 @@ func TestRegisterContainerInstanceNoIID(t *testing.T) {
nil),
)
- arn, err := client.RegisterContainerInstance("", capabilities, containerInstanceTags)
+ arn, availabilityzone, err := client.RegisterContainerInstance("", capabilities, containerInstanceTags, registrationToken)
assert.NoError(t, err)
assert.Equal(t, "registerArn", arn)
+ assert.Equal(t, "us-west-2b", availabilityzone)
}
// TestRegisterContainerInstanceWithNegativeResource tests the registeration should fail with negative resource
@@ -521,7 +528,7 @@ func TestRegisterContainerInstanceWithNegativeResource(t *testing.T) {
mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentResource).Return("instanceIdentityDocument", nil),
mockEC2Metadata.EXPECT().GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource).Return("signature", nil),
)
- _, err := client.RegisterContainerInstance("", nil, nil)
+ _, _, err := client.RegisterContainerInstance("", nil, nil, "")
assert.Error(t, err, "Register resource with negative value should cause registration fail")
}
@@ -551,7 +558,7 @@ func TestRegisterContainerInstanceWithEmptyTags(t *testing.T) {
nil),
)
- _, err := client.RegisterContainerInstance("", nil, make([]*ecs.Tag, 0))
+ _, _, err := client.RegisterContainerInstance("", nil, make([]*ecs.Tag, 0), "")
assert.NoError(t, err)
}
@@ -625,13 +632,16 @@ func TestRegisterBlankCluster(t *testing.T) {
nil),
)
- arn, err := client.RegisterContainerInstance("", nil, nil)
+ arn, availabilityzone, err := client.RegisterContainerInstance("", nil, nil, "")
if err != nil {
t.Errorf("Should not be an error: %v", err)
}
if arn != "registerArn" {
t.Errorf("Wrong arn: %v", arn)
}
+ if availabilityzone != "" {
+ t.Errorf("wrong availability zone: %v", availabilityzone)
+ }
}
func TestDiscoverTelemetryEndpoint(t *testing.T) {
diff --git a/agent/api/interface.go b/agent/api/interface.go
index 53babc29ee1..ef25f7f6e5a 100644
--- a/agent/api/interface.go
+++ b/agent/api/interface.go
@@ -27,7 +27,7 @@ type ECSClient interface {
// instance ARN allows a container instance to update its registered
// resources.
RegisterContainerInstance(existingContainerInstanceArn string,
- attributes []*ecs.Attribute, tags []*ecs.Tag) (string, error)
+ attributes []*ecs.Attribute, tags []*ecs.Tag, registrationToken string) (string, string, error)
// SubmitTaskStateChange sends a state change and returns an error
// indicating if it was submitted
SubmitTaskStateChange(change TaskStateChange) error
diff --git a/agent/api/mocks/api_mocks.go b/agent/api/mocks/api_mocks.go
index 0987bc179fd..e1603c05ffb 100644
--- a/agent/api/mocks/api_mocks.go
+++ b/agent/api/mocks/api_mocks.go
@@ -186,16 +186,17 @@ func (mr *MockECSClientMockRecorder) DiscoverTelemetryEndpoint(arg0 interface{})
}
// RegisterContainerInstance mocks base method
-func (m *MockECSClient) RegisterContainerInstance(arg0 string, arg1 []*ecs.Attribute, arg2 []*ecs.Tag) (string, error) {
- ret := m.ctrl.Call(m, "RegisterContainerInstance", arg0, arg1, arg2)
+func (m *MockECSClient) RegisterContainerInstance(arg0 string, arg1 []*ecs.Attribute, arg2 []*ecs.Tag, arg3 string) (string, string, error) {
+ ret := m.ctrl.Call(m, "RegisterContainerInstance", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(string)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret1, _ := ret[1].(string)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
}
// RegisterContainerInstance indicates an expected call of RegisterContainerInstance
-func (mr *MockECSClientMockRecorder) RegisterContainerInstance(arg0, arg1, arg2 interface{}) *gomock.Call {
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterContainerInstance", reflect.TypeOf((*MockECSClient)(nil).RegisterContainerInstance), arg0, arg1, arg2)
+func (mr *MockECSClientMockRecorder) RegisterContainerInstance(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterContainerInstance", reflect.TypeOf((*MockECSClient)(nil).RegisterContainerInstance), arg0, arg1, arg2, arg3)
}
// SubmitContainerStateChange mocks base method
diff --git a/agent/api/task/task.go b/agent/api/task/task.go
index 9b5012b9f5a..00b1f71dc2a 100644
--- a/agent/api/task/task.go
+++ b/agent/api/task/task.go
@@ -37,6 +37,7 @@ import (
"github.com/aws/amazon-ecs-agent/agent/ecscni"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
"github.com/aws/amazon-ecs-agent/agent/taskresource/asmauth"
+ "github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret"
"github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret"
resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
resourcetype "github.com/aws/amazon-ecs-agent/agent/taskresource/types"
@@ -249,6 +250,10 @@ func (task *Task) PostUnmarshalTask(cfg *config.Config,
task.initializeSSMSecretResource(credentialsManager, resourceFields)
}
+ if task.requiresASMSecret() {
+ task.initializeASMSecretResource(credentialsManager, resourceFields)
+ }
+
err := task.initializeDockerLocalVolumes(dockerClient, ctx)
if err != nil {
return apierrors.NewResourceInitError(task.Arn, err)
@@ -574,6 +579,51 @@ func (task *Task) getAllSSMSecretRequirements() map[string][]apicontainer.Secret
return reqs
}
+// requiresASMSecret returns true if at least one container in the task
+// needs to retrieve secret from AWS Secrets Manager
+func (task *Task) requiresASMSecret() bool {
+ for _, container := range task.Containers {
+ if container.ShouldCreateWithASMSecret() {
+ return true
+ }
+ }
+ return false
+}
+
+// initializeASMSecretResource builds the resource dependency map for the asmsecret resource
+func (task *Task) initializeASMSecretResource(credentialsManager credentials.Manager,
+ resourceFields *taskresource.ResourceFields) {
+ asmSecretResource := asmsecret.NewASMSecretResource(task.Arn, task.getAllASMSecretRequirements(),
+ task.ExecutionCredentialsID, credentialsManager, resourceFields.ASMClientCreator)
+ task.AddResource(asmsecret.ResourceName, asmSecretResource)
+
+ // for every container that needs asm secret vending as envvar, it needs to wait all secrets got retrieved
+ for _, container := range task.Containers {
+ if container.ShouldCreateWithASMSecret() {
+ container.BuildResourceDependency(asmSecretResource.GetName(),
+ resourcestatus.ResourceStatus(asmsecret.ASMSecretCreated),
+ apicontainerstatus.ContainerCreated)
+ }
+ }
+}
+
+// getAllASMSecretRequirements stores secrets in a task in a map
+func (task *Task) getAllASMSecretRequirements() map[string]apicontainer.Secret {
+ reqs := make(map[string]apicontainer.Secret)
+
+ for _, container := range task.Containers {
+ for _, secret := range container.Secrets {
+ if secret.Provider == apicontainer.SecretProviderASM {
+ secretKey := secret.GetSecretResourceCacheKey()
+ if _, ok := reqs[secretKey]; !ok {
+ reqs[secretKey] = secret
+ }
+ }
+ }
+ }
+ return reqs
+}
+
// BuildCNIConfig constructs the cni configuration from eni
func (task *Task) BuildCNIConfig() (*ecscni.Config, error) {
if !task.isNetworkModeVPC() {
@@ -1617,20 +1667,49 @@ func (task *Task) getASMAuthResource() ([]taskresource.TaskResource, bool) {
return res, ok
}
-// PopulateSSMSecrets appends the container's env var map with ssm parameters
-func (task *Task) PopulateSSMSecrets(container *apicontainer.Container) *apierrors.DockerClientConfigError {
- resource, ok := task.getSSMSecretsResource()
- if !ok {
- return &apierrors.DockerClientConfigError{"task secret data: unable to fetch SSM Secrets resource"}
+// getSSMSecretsResource retrieves ssmsecret resource from resource map
+func (task *Task) getSSMSecretsResource() ([]taskresource.TaskResource, bool) {
+ task.lock.RLock()
+ defer task.lock.RUnlock()
+
+ res, ok := task.ResourcesMapUnsafe[ssmsecret.ResourceName]
+ return res, ok
+}
+
+// PopulateSecretsAsEnv appends the container's env var map with secrets
+func (task *Task) PopulateSecretsAsEnv(container *apicontainer.Container) *apierrors.DockerClientConfigError {
+ var ssmRes *ssmsecret.SSMSecretResource
+ var asmRes *asmsecret.ASMSecretResource
+
+ if container.ShouldCreateWithSSMSecret() {
+ resource, ok := task.getSSMSecretsResource()
+ if !ok {
+ return &apierrors.DockerClientConfigError{"task secret data: unable to fetch SSM Secrets resource"}
+ }
+ ssmRes = resource[0].(*ssmsecret.SSMSecretResource)
+ }
+
+ if container.ShouldCreateWithASMSecret() {
+ resource, ok := task.getASMSecretsResource()
+ if !ok {
+ return &apierrors.DockerClientConfigError{"task secret data: unable to fetch ASM Secrets resource"}
+ }
+ asmRes = resource[0].(*asmsecret.ASMSecretResource)
}
- ssmResource := resource[0].(*ssmsecret.SSMSecretResource)
envVars := make(map[string]string)
for _, secret := range container.Secrets {
- if secret.Provider == apicontainer.SecretProviderSSM {
- k := secret.GetSSMSecretResourceCacheKey()
- if secretValue, ok := ssmResource.GetCachedSecretValue(k); ok {
+ if secret.Provider == apicontainer.SecretProviderSSM && secret.Type == apicontainer.SecretTypeEnv {
+ k := secret.GetSecretResourceCacheKey()
+ if secretValue, ok := ssmRes.GetCachedSecretValue(k); ok {
+ envVars[secret.Name] = secretValue
+ }
+ }
+
+ if secret.Provider == apicontainer.SecretProviderASM && secret.Type == apicontainer.SecretTypeEnv {
+ k := secret.GetSecretResourceCacheKey()
+ if secretValue, ok := asmRes.GetCachedSecretValue(k); ok {
envVars[secret.Name] = secretValue
}
}
@@ -1640,11 +1719,12 @@ func (task *Task) PopulateSSMSecrets(container *apicontainer.Container) *apierro
return nil
}
-func (task *Task) getSSMSecretsResource() ([]taskresource.TaskResource, bool) {
+// getASMSecretsResource retrieves asmsecret resource from resource map
+func (task *Task) getASMSecretsResource() ([]taskresource.TaskResource, bool) {
task.lock.RLock()
defer task.lock.RUnlock()
- res, ok := task.ResourcesMapUnsafe[ssmsecret.ResourceName]
+ res, ok := task.ResourcesMapUnsafe[asmsecret.ResourceName]
return res, ok
}
diff --git a/agent/api/task/task_linux_test.go b/agent/api/task/task_linux_test.go
index 35fff213e7b..3ec3b9a4801 100644
--- a/agent/api/task/task_linux_test.go
+++ b/agent/api/task/task_linux_test.go
@@ -64,7 +64,7 @@ func TestAddNetworkResourceProvisioningDependencyWithENI(t *testing.T) {
ENI: &apieni.ENI{},
Containers: []*apicontainer.Container{
{
- Name: "c1",
+ Name: "c1",
TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
},
},
@@ -358,7 +358,7 @@ func TestInitCgroupResourceSpecHappyPath(t *testing.T) {
Memory: taskMemoryLimit,
Containers: []*apicontainer.Container{
{
- Name: "c1",
+ Name: "c1",
TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
},
},
@@ -386,7 +386,7 @@ func TestInitCgroupResourceSpecInvalidARN(t *testing.T) {
Version: "1",
Containers: []*apicontainer.Container{
{
- Name: "c1",
+ Name: "c1",
TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
},
},
@@ -406,8 +406,8 @@ func TestInitCgroupResourceSpecInvalidMem(t *testing.T) {
Memory: taskMemoryLimit,
Containers: []*apicontainer.Container{
{
- Name: "C1",
- Memory: uint(2048), // container memory > task memory
+ Name: "C1",
+ Memory: uint(2048), // container memory > task memory
TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
},
},
@@ -426,7 +426,7 @@ func TestPostUnmarshalWithCPULimitsFail(t *testing.T) {
Version: "1",
Containers: []*apicontainer.Container{
{
- Name: "c1",
+ Name: "c1",
TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
},
},
diff --git a/agent/api/task/task_test.go b/agent/api/task/task_test.go
index 449052c8022..d215e240bfa 100644
--- a/agent/api/task/task_test.go
+++ b/agent/api/task/task_test.go
@@ -29,7 +29,7 @@ import (
apieni "github.com/aws/amazon-ecs-agent/agent/api/eni"
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
"github.com/aws/amazon-ecs-agent/agent/asm"
- mock_factory "github.com/aws/amazon-ecs-agent/agent/asm/factory/mocks"
+ "github.com/aws/amazon-ecs-agent/agent/asm/factory/mocks"
"github.com/aws/amazon-ecs-agent/agent/asm/mocks"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/credentials"
@@ -46,6 +46,7 @@ import (
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"github.com/aws/aws-sdk-go/service/secretsmanager"
+ "github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret"
"github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret"
"github.com/aws/aws-sdk-go/aws"
"github.com/docker/docker/api/types"
@@ -56,7 +57,11 @@ import (
"github.com/stretchr/testify/require"
)
-const dockerIDPrefix = "dockerid-"
+const (
+ dockerIDPrefix = "dockerid-"
+ secretKeyWest1 = "/test/secretName_us-west-2"
+ asmSecretKeyWest1 = "arn:aws:secretsmanager:us-west-2:11111:secret:/test/secretName_us-west-2"
+)
var defaultDockerClientAPIVersion = dockerclient.Version_1_17
@@ -1024,6 +1029,14 @@ func TestTaskFromACS(t *testing.T) {
HostConfig: strptr("hostconfig json"),
Version: strptr("version string"),
},
+ Secrets: []*ecsacs.Secret{
+ {
+ Name: strptr("secret"),
+ ValueFrom: strptr("/test/secret"),
+ Provider: strptr("ssm"),
+ Region: strptr("us-west-2"),
+ },
+ },
},
},
Volumes: []*ecsacs.Volume{
@@ -1090,6 +1103,14 @@ func TestTaskFromACS(t *testing.T) {
HostConfig: strptr("hostconfig json"),
Version: strptr("version string"),
},
+ Secrets: []apicontainer.Secret{
+ {
+ Name: "secret",
+ ValueFrom: "/test/secret",
+ Provider: "ssm",
+ Region: "us-west-2",
+ },
+ },
TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
},
},
@@ -2059,6 +2080,53 @@ func TestPostUnmarshalTaskSecret(t *testing.T) {
assert.NoError(t, err)
}
+func TestPostUnmarshalTaskASMSecret(t *testing.T) {
+ secret := apicontainer.Secret{
+ Provider: "asm",
+ Name: "secret",
+ Region: "us-west-2",
+ ValueFrom: "/test/secretName",
+ }
+
+ container := &apicontainer.Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: []apicontainer.Secret{secret},
+ TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
+ }
+
+ task := &Task{
+ Arn: "test",
+ ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource),
+ Containers: []*apicontainer.Container{container},
+ }
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ cfg := &config.Config{}
+ credentialsManager := mock_credentials.NewMockManager(ctrl)
+ asmClientCreator := mock_factory.NewMockClientCreator(ctrl)
+
+ resFields := &taskresource.ResourceFields{
+ ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{
+ ASMClientCreator: asmClientCreator,
+ CredentialsManager: credentialsManager,
+ },
+ }
+
+ resourceDep := apicontainer.ResourceDependency{
+ Name: asmsecret.ResourceName,
+ RequiredStatus: resourcestatus.ResourceStatus(asmsecret.ASMSecretCreated),
+ }
+
+ err := task.PostUnmarshalTask(cfg, credentialsManager, resFields, nil, nil)
+ assert.NoError(t, err)
+
+ assert.Equal(t, 1, len(task.ResourcesMapUnsafe))
+ assert.Equal(t, resourceDep, task.Containers[0].TransitionDependenciesMap[apicontainerstatus.ContainerCreated].ResourceDependencies[0])
+}
+
func TestGetAllSSMSecretRequirements(t *testing.T) {
regionWest := "us-west-2"
regionEast := "us-east-1"
@@ -2211,3 +2279,251 @@ func TestRequiresSSMSecretNoSecret(t *testing.T) {
assert.Equal(t, false, task.requiresSSMSecret())
}
+func TestRequiresASMSecret(t *testing.T) {
+ secret := apicontainer.Secret{
+ Provider: "asm",
+ Name: "secret",
+ Region: "us-west-2",
+ ValueFrom: "/test/secretName",
+ }
+
+ container := &apicontainer.Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: []apicontainer.Secret{secret},
+ TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
+ }
+
+ container1 := &apicontainer.Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: nil,
+ TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
+ }
+
+ task := &Task{
+ Arn: "test",
+ ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource),
+ Containers: []*apicontainer.Container{container, container1},
+ }
+
+ assert.True(t, task.requiresASMSecret())
+}
+
+func TestRequiresASMSecretNoSecret(t *testing.T) {
+ container := &apicontainer.Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: nil,
+ TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
+ }
+
+ container1 := &apicontainer.Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: nil,
+ TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
+ }
+
+ task := &Task{
+ Arn: "test",
+ ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource),
+ Containers: []*apicontainer.Container{container, container1},
+ }
+
+ assert.False(t, task.requiresASMSecret())
+}
+
+func TestGetAllASMSecretRequirements(t *testing.T) {
+ regionWest := "us-west-2"
+ regionEast := "us-east-1"
+
+ secret1 := apicontainer.Secret{
+ Provider: "asm",
+ Name: "secret1",
+ Region: regionWest,
+ ValueFrom: "/test/secretName1",
+ }
+
+ secret2 := apicontainer.Secret{
+ Provider: "asm",
+ Name: "secret2",
+ Region: regionWest,
+ ValueFrom: "/test/secretName2",
+ }
+
+ secret3 := apicontainer.Secret{
+ Provider: "ssm",
+ Name: "secret3",
+ Region: regionEast,
+ ValueFrom: "/test/secretName3",
+ }
+
+ secret4 := apicontainer.Secret{
+ Provider: "asm",
+ Name: "secret4",
+ Region: regionWest,
+ ValueFrom: "/test/secretName1",
+ }
+
+ container := &apicontainer.Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: []apicontainer.Secret{secret1, secret2, secret3, secret4},
+ TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
+ }
+
+ task := &Task{
+ Arn: "test",
+ ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource),
+ Containers: []*apicontainer.Container{container},
+ }
+
+ reqs := task.getAllASMSecretRequirements()
+ assert.Equal(t, secret1, reqs["/test/secretName1_us-west-2"])
+ assert.Equal(t, secret2, reqs["/test/secretName2_us-west-2"])
+ assert.Equal(t, 2, len(reqs))
+}
+
+func TestInitializeAndGetASMSecretResource(t *testing.T) {
+ secret := apicontainer.Secret{
+ Provider: "asm",
+ Name: "secret",
+ Region: "us-west-2",
+ ValueFrom: "/test/secretName",
+ }
+
+ container := &apicontainer.Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: []apicontainer.Secret{secret},
+ TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
+ }
+
+ container1 := &apicontainer.Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: nil,
+ TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
+ }
+
+ task := &Task{
+ Arn: "test",
+ ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource),
+ Containers: []*apicontainer.Container{container, container1},
+ }
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ credentialsManager := mock_credentials.NewMockManager(ctrl)
+ asmClientCreator := mock_factory.NewMockClientCreator(ctrl)
+
+ resFields := &taskresource.ResourceFields{
+ ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{
+ ASMClientCreator: asmClientCreator,
+ CredentialsManager: credentialsManager,
+ },
+ }
+
+ task.initializeASMSecretResource(credentialsManager, resFields)
+
+ resourceDep := apicontainer.ResourceDependency{
+ Name: asmsecret.ResourceName,
+ RequiredStatus: resourcestatus.ResourceStatus(asmsecret.ASMSecretCreated),
+ }
+
+ assert.Equal(t, resourceDep, task.Containers[0].TransitionDependenciesMap[apicontainerstatus.ContainerCreated].ResourceDependencies[0])
+ assert.Equal(t, 0, len(task.Containers[1].TransitionDependenciesMap))
+
+ _, ok := task.getASMSecretsResource()
+ assert.True(t, ok)
+}
+
+func TestPopulateSecretsAsEnv(t *testing.T) {
+ secret1 := apicontainer.Secret{
+ Provider: "ssm",
+ Name: "secret1",
+ Region: "us-west-2",
+ Type: "ENVIRONMENT_VARIABLE",
+ ValueFrom: "/test/secretName",
+ }
+
+ secret2 := apicontainer.Secret{
+ Provider: "asm",
+ Name: "secret2",
+ Region: "us-west-2",
+ Type: "ENVIRONMENT_VARIABLE",
+ ValueFrom: "arn:aws:secretsmanager:us-west-2:11111:secret:/test/secretName",
+ }
+
+ container := &apicontainer.Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: []apicontainer.Secret{secret1, secret2},
+ TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
+ }
+
+ task := &Task{
+ Arn: "test",
+ ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource),
+ Containers: []*apicontainer.Container{container},
+ }
+
+ ssmRes := &ssmsecret.SSMSecretResource{}
+ ssmRes.SetCachedSecretValue(secretKeyWest1, "secretValue1")
+
+ asmRes := &asmsecret.ASMSecretResource{}
+ asmRes.SetCachedSecretValue(asmSecretKeyWest1, "secretValue2")
+
+ task.AddResource(ssmsecret.ResourceName, ssmRes)
+ task.AddResource(asmsecret.ResourceName, asmRes)
+
+ task.PopulateSecretsAsEnv(container)
+ assert.Equal(t, "secretValue1", container.Environment["secret1"])
+ assert.Equal(t, "secretValue2", container.Environment["secret2"])
+}
+
+func TestPopulateSecretsAsEnvOnlySSM(t *testing.T) {
+ secret1 := apicontainer.Secret{
+ Provider: "asm",
+ Name: "secret1",
+ Region: "us-west-2",
+ Type: "MOUNT_POINT",
+ ValueFrom: "arn:aws:secretsmanager:us-west-2:11111:secret:/test/secretName",
+ }
+
+ secret2 := apicontainer.Secret{
+ Provider: "ssm",
+ Name: "secret2",
+ Region: "us-west-2",
+ Type: "ENVIRONMENT_VARIABLE",
+ ValueFrom: "/test/secretName",
+ }
+
+ container := &apicontainer.Container{
+ Name: "myName",
+ Image: "image:tag",
+ Secrets: []apicontainer.Secret{secret1, secret2},
+ TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
+ }
+
+ task := &Task{
+ Arn: "test",
+ ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource),
+ Containers: []*apicontainer.Container{container},
+ }
+
+ asmRes := &asmsecret.ASMSecretResource{}
+ asmRes.SetCachedSecretValue(asmSecretKeyWest1, "secretValue1")
+
+ ssmRes := &ssmsecret.SSMSecretResource{}
+ ssmRes.SetCachedSecretValue(secretKeyWest1, "secretValue2")
+
+ task.AddResource(ssmsecret.ResourceName, ssmRes)
+ task.AddResource(asmsecret.ResourceName, asmRes)
+
+ task.PopulateSecretsAsEnv(container)
+ assert.Equal(t, "secretValue2", container.Environment["secret2"])
+ assert.Equal(t, 1, len(container.Environment))
+}
diff --git a/agent/app/agent.go b/agent/app/agent.go
index 9e9a18a603e..2fcef486d0b 100644
--- a/agent/app/agent.go
+++ b/agent/app/agent.go
@@ -18,6 +18,8 @@ import (
"errors"
"fmt"
+ "github.com/aws/amazon-ecs-agent/agent/metrics"
+
acshandler "github.com/aws/amazon-ecs-agent/agent/acs/handler"
"github.com/aws/amazon-ecs-agent/agent/api"
"github.com/aws/amazon-ecs-agent/agent/api/ecsclient"
@@ -52,6 +54,7 @@ import (
aws_credentials "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/cihub/seelog"
+ "github.com/pborman/uuid"
)
const (
@@ -107,6 +110,7 @@ type ecsAgent struct {
terminationHandler sighandlers.TerminationHandler
mobyPlugins mobypkgwrapper.Plugins
resourceFields *taskresource.ResourceFields
+ availabilityZone string
}
// newAgent returns a new ecsAgent object, but does not start anything
@@ -237,9 +241,11 @@ func (agent *ecsAgent) doStart(containerChangeEventStream *eventstream.EventStre
return exitcodes.ExitTerminal
}
+ agent.initMetricsEngine()
+
// Initialize the state manager
stateManager, err := agent.newStateManager(taskEngine,
- &agent.cfg.Cluster, &agent.containerInstanceARN, ¤tEC2InstanceID)
+ &agent.cfg.Cluster, &agent.containerInstanceARN, ¤tEC2InstanceID, &agent.availabilityZone)
if err != nil {
seelog.Criticalf("Error creating state manager: %v", err)
return exitcodes.ExitTerminal
@@ -283,6 +289,7 @@ func (agent *ecsAgent) doStart(containerChangeEventStream *eventstream.EventStre
// Add container instance ARN to metadata manager
if agent.cfg.ContainerMetadataEnabled {
agent.metadataManager.SetContainerInstanceARN(agent.containerInstanceARN)
+ agent.metadataManager.SetAvailabilityZone(agent.availabilityZone)
}
// Begin listening to the docker daemon and saving changes
@@ -320,7 +327,7 @@ func (agent *ecsAgent) newTaskEngine(containerChangeEventStream *eventstream.Eve
}
// We try to set these values by loading the existing state file first
- var previousCluster, previousEC2InstanceID, previousContainerInstanceArn string
+ var previousCluster, previousEC2InstanceID, previousContainerInstanceArn, previousAZ string
previousTaskEngine := engine.NewTaskEngine(agent.cfg, agent.dockerClient,
credentialsManager, containerChangeEventStream, imageManager, state,
agent.metadataManager, agent.resourceFields)
@@ -328,7 +335,7 @@ func (agent *ecsAgent) newTaskEngine(containerChangeEventStream *eventstream.Eve
// previousStateManager is used to verify that our current runtime configuration is
// compatible with our past configuration as reflected by our state-file
previousStateManager, err := agent.newStateManager(previousTaskEngine, &previousCluster,
- &previousContainerInstanceArn, &previousEC2InstanceID)
+ &previousContainerInstanceArn, &previousEC2InstanceID, &previousAZ)
if err != nil {
seelog.Criticalf("Error creating state manager: %v", err)
return nil, "", err
@@ -371,6 +378,20 @@ func (agent *ecsAgent) newTaskEngine(containerChangeEventStream *eventstream.Eve
return previousTaskEngine, currentEC2InstanceID, nil
}
+func (agent *ecsAgent) initMetricsEngine() {
+ // In case of a panic during set-up, we will recover quietly and resume
+ // normal Agent execution.
+ defer func() {
+ if r := recover(); r != nil {
+ seelog.Errorf("MetricsEngine Set-up panicked. Recovering quietly: %s", r)
+ }
+ }()
+
+ // We init the global MetricsEngine before we publish metrics
+ metrics.MustInit(agent.cfg)
+ metrics.PublishMetrics()
+}
+
// setClusterInConfig sets the cluster name in the config object based on
// previous state. It returns an error if there's a mismatch between the
// the current cluster name with what's restored from the cluster state
@@ -412,7 +433,8 @@ func (agent *ecsAgent) newStateManager(
taskEngine engine.TaskEngine,
cluster *string,
containerInstanceArn *string,
- savedInstanceID *string) (statemanager.StateManager, error) {
+ savedInstanceID *string,
+ availabilityZone *string) (statemanager.StateManager, error) {
if !agent.cfg.Checkpoint {
return statemanager.NewNoopStateManager(), nil
@@ -426,6 +448,7 @@ func (agent *ecsAgent) newStateManager(
agent.saveableOptionFactory.AddSaveable("Cluster", cluster),
// This is for making testing easier as we can mock this
agent.saveableOptionFactory.AddSaveable("EC2InstanceID", savedInstanceID),
+ agent.saveableOptionFactory.AddSaveable("availabilityZone", availabilityZone),
)
}
@@ -476,11 +499,11 @@ func (agent *ecsAgent) registerContainerInstance(
if agent.containerInstanceARN != "" {
seelog.Infof("Restored from checkpoint file. I am running as '%s' in cluster '%s'", agent.containerInstanceARN, agent.cfg.Cluster)
- return agent.reregisterContainerInstance(client, capabilities, tags)
+ return agent.reregisterContainerInstance(client, capabilities, tags, uuid.New())
}
seelog.Info("Registering Instance with ECS")
- containerInstanceArn, err := client.RegisterContainerInstance("", capabilities, tags)
+ containerInstanceArn, availabilityZone, err := client.RegisterContainerInstance("", capabilities, tags, uuid.New())
if err != nil {
seelog.Errorf("Error registering: %v", err)
if retriable, ok := err.(apierrors.Retriable); ok && !retriable.Retry() {
@@ -498,6 +521,7 @@ func (agent *ecsAgent) registerContainerInstance(
}
seelog.Infof("Registration completed successfully. I am running as '%s' in cluster '%s'", containerInstanceArn, agent.cfg.Cluster)
agent.containerInstanceARN = containerInstanceArn
+ agent.availabilityZone = availabilityZone
// Save our shiny new containerInstanceArn
stateManager.Save()
return nil
@@ -507,8 +531,10 @@ func (agent *ecsAgent) registerContainerInstance(
// registered with ECS. This is for cases where the ECS Agent is being restored
// from a check point.
func (agent *ecsAgent) reregisterContainerInstance(client api.ECSClient,
- capabilities []*ecs.Attribute, tags []*ecs.Tag) error {
- _, err := client.RegisterContainerInstance(agent.containerInstanceARN, capabilities, tags)
+ capabilities []*ecs.Attribute, tags []*ecs.Tag, registrationToken string) error {
+ _, availabilityZone, err := client.RegisterContainerInstance(agent.containerInstanceARN, capabilities, tags, registrationToken)
+ //set az to agent
+ agent.availabilityZone = availabilityZone
if err == nil {
return nil
@@ -550,7 +576,7 @@ func (agent *ecsAgent) startAsyncRoutines(
statsEngine := stats.NewDockerStatsEngine(agent.cfg, agent.dockerClient, containerChangeEventStream)
// Start serving the endpoint to fetch IAM Role credentials and other task metadata
- go handlers.ServeTaskHTTPEndpoint(credentialsManager, state, agent.containerInstanceARN, agent.cfg, statsEngine)
+ go handlers.ServeTaskHTTPEndpoint(credentialsManager, state, agent.containerInstanceARN, agent.cfg, statsEngine, agent.availabilityZone)
// Start sending events to the backend
go eventhandler.HandleEngineEvents(taskEngine, client, taskHandler)
diff --git a/agent/app/agent_capability.go b/agent/app/agent_capability.go
index c234b61ac1f..db13eaddda6 100644
--- a/agent/app/agent_capability.go
+++ b/agent/app/agent_capability.go
@@ -38,7 +38,9 @@ const (
attributeSeparator = "."
capabilityPrivateRegistryAuthASM = "private-registry-authentication.secretsmanager"
capabilitySecretEnvSSM = "secrets.ssm.environment-variables"
+ capabilitySecretEnvASM = "secrets.asm.environment-variables"
capabiltyPIDAndIPCNamespaceSharing = "pid-ipc-namespace-sharing"
+ capabilityECREndpoint = "ecr-endpoint"
)
// capabilities returns the supported capabilities of this agent / docker-client pair.
@@ -68,6 +70,9 @@ const (
// ecs.capability.container-health-check
// ecs.capability.private-registry-authentication.secretsmanager
// ecs.capability.secrets.ssm.environment-variables
+// ecs.capability.pid-ipc-namespace-sharing
+// ecs.capability.ecr-endpoint
+// ecs.capability.secrets.asm.environment-variables
func (agent *ecsAgent) capabilities() ([]*ecs.Attribute, error) {
var capabilities []*ecs.Attribute
@@ -122,6 +127,12 @@ func (agent *ecsAgent) capabilities() ([]*ecs.Attribute, error) {
// with host EC2 instance and among containers within the task
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabiltyPIDAndIPCNamespaceSharing)
+ // support ecr endpoint override
+ capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityECREndpoint)
+
+ // ecs agent version 1.23.0 supports ecs secrets integrating with aws secrets manager
+ capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilitySecretEnvASM)
+
return capabilities, nil
}
diff --git a/agent/app/agent_capability_test.go b/agent/app/agent_capability_test.go
index 9bc44910d3e..e0b51bc612a 100644
--- a/agent/app/agent_capability_test.go
+++ b/agent/app/agent_capability_test.go
@@ -114,6 +114,15 @@ func TestCapabilities(t *testing.T) {
{
Name: aws.String(attributePrefix + capabilitySecretEnvSSM),
},
+ {
+ Name: aws.String(attributePrefix + capabiltyPIDAndIPCNamespaceSharing),
+ },
+ {
+ Name: aws.String(attributePrefix + capabilityECREndpoint),
+ },
+ {
+ Name: aws.String(attributePrefix + capabilitySecretEnvASM),
+ },
}...)
ctx, cancel := context.WithCancel(context.TODO())
diff --git a/agent/app/agent_compatibility_linux_test.go b/agent/app/agent_compatibility_linux_test.go
index f876099b939..99ae135142c 100644
--- a/agent/app/agent_compatibility_linux_test.go
+++ b/agent/app/agent_compatibility_linux_test.go
@@ -47,7 +47,7 @@ func TestCompatibilityEnabledSuccess(t *testing.T) {
gomock.InOrder(
saveableOptionFactory.EXPECT().AddSaveable(gomock.Any(), gomock.Any()).AnyTimes(),
- stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(stateManager, nil),
+ stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(stateManager, nil),
stateManager.EXPECT().Load().AnyTimes(),
state.EXPECT().AllTasks().Return([]*apitask.Task{}),
)
@@ -79,7 +79,7 @@ func TestCompatibilityDefaultEnabledFail(t *testing.T) {
}
gomock.InOrder(
saveableOptionFactory.EXPECT().AddSaveable(gomock.Any(), gomock.Any()).AnyTimes(),
- stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(stateManager, nil),
+ stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(stateManager, nil),
stateManager.EXPECT().Load().AnyTimes(),
state.EXPECT().AllTasks().Return(getTaskListWithOneBadTask()),
)
@@ -110,7 +110,7 @@ func TestCompatibilityExplicitlyEnabledFail(t *testing.T) {
}
gomock.InOrder(
saveableOptionFactory.EXPECT().AddSaveable(gomock.Any(), gomock.Any()).AnyTimes(),
- stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(stateManager, nil),
+ stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(stateManager, nil),
stateManager.EXPECT().Load().AnyTimes(),
state.EXPECT().AllTasks().Return(getTaskListWithOneBadTask()),
)
diff --git a/agent/app/agent_test.go b/agent/app/agent_test.go
index 6d8b6400b8b..129e9f1b346 100644
--- a/agent/app/agent_test.go
+++ b/agent/app/agent_test.go
@@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"sort"
+ "sync"
"testing"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
@@ -27,11 +28,14 @@ import (
"github.com/aws/amazon-ecs-agent/agent/app/factory/mocks"
app_mocks "github.com/aws/amazon-ecs-agent/agent/app/mocks"
"github.com/aws/amazon-ecs-agent/agent/config"
+ "github.com/aws/amazon-ecs-agent/agent/containermetadata/mocks"
"github.com/aws/amazon-ecs-agent/agent/credentials/mocks"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
+ "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi/mocks"
"github.com/aws/amazon-ecs-agent/agent/ec2/mocks"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
+ "github.com/aws/amazon-ecs-agent/agent/engine"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerstate/mocks"
"github.com/aws/amazon-ecs-agent/agent/engine/mocks"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
@@ -49,6 +53,7 @@ import (
const (
clusterName = "some-cluster"
containerInstanceARN = "container-instance1"
+ availabilityZone = "us-west-2b"
)
var apiVersions = []dockerclient.DockerVersion{
@@ -103,7 +108,6 @@ func TestDoStartMinimumSupportedDockerVersionTerminal(t *testing.T) {
stateManagerFactory: stateManagerFactory,
saveableOptionFactory: saveableOptionFactory,
}
-
exitCode := agent.doStart(eventstream.NewEventStream("events", ctx),
credentialsManager, state, imageManager, client)
assert.Equal(t, exitcodes.ExitTerminal, exitCode)
@@ -146,11 +150,12 @@ func TestDoStartNewTaskEngineError(t *testing.T) {
saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil),
+ saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil),
+
// An error in creating the state manager should result in an
// error from newTaskEngine as well
- stateManagerFactory.EXPECT().NewStateManager(gomock.Any(),
- gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
- ).Return(
+ stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(), gomock.Any(),
+ gomock.Any(), gomock.Any(), gomock.Any()).Return(
nil, errors.New("error")),
)
@@ -184,16 +189,18 @@ func TestDoStartNewStateManagerError(t *testing.T) {
saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil),
- stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil),
+ stateManagerFactory.EXPECT().NewStateManager(gomock.Any(),
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
statemanager.NewNoopStateManager(), nil),
state.EXPECT().AllTasks().AnyTimes(),
ec2MetadataClient.EXPECT().InstanceID().Return(expectedInstanceID, nil),
saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil),
- stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil),
+ stateManagerFactory.EXPECT().NewStateManager(gomock.Any(),
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
nil, errors.New("error")),
)
@@ -232,8 +239,8 @@ func TestDoStartRegisterContainerInstanceErrorTerminal(t *testing.T) {
mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{""}, nil),
dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any()).Return(
- "", apierrors.NewAttributeError("error")),
+ client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ "", "", apierrors.NewAttributeError("error")),
)
cfg := getTestConfig()
@@ -269,8 +276,8 @@ func TestDoStartRegisterContainerInstanceErrorNonTerminal(t *testing.T) {
mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{""}, nil),
dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any()).Return(
- "", errors.New("error")),
+ client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ "", "", errors.New("error")),
)
cfg := getTestConfig()
@@ -290,6 +297,72 @@ func TestDoStartRegisterContainerInstanceErrorNonTerminal(t *testing.T) {
assert.Equal(t, exitcodes.ExitError, exitCode)
}
+func TestDoStartRegisterAvailabilityZone(t *testing.T) {
+ ctrl, credentialsManager, state, imageManager, client,
+ dockerClient, _, _ := setup(t)
+ defer ctrl.Finish()
+
+ var discoverEndpointsInvoked sync.WaitGroup
+ discoverEndpointsInvoked.Add(2)
+ mockMobyPlugins := mock_mobypkgwrapper.NewMockPlugins(ctrl)
+ dockerClient.EXPECT().Version(gomock.Any(), gomock.Any()).AnyTimes()
+ mockCredentialsProvider := app_mocks.NewMockProvider(ctrl)
+ containermetadata := mock_containermetadata.NewMockManager(ctrl)
+ imageManager.EXPECT().StartImageCleanupProcess(gomock.Any()).MaxTimes(1)
+ dockerClient.EXPECT().ListContainers(gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ dockerapi.ListContainersResponse{}).AnyTimes()
+ client.EXPECT().DiscoverPollEndpoint(gomock.Any()).Do(func(x interface{}) {
+ // Ensures that the test waits until acs session has bee started
+ discoverEndpointsInvoked.Done()
+ }).Return("poll-endpoint", nil)
+ client.EXPECT().DiscoverPollEndpoint(gomock.Any()).Return("acs-endpoint", nil).AnyTimes()
+ client.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Do(func(x interface{}) {
+ // Ensures that the test waits until telemetry session has bee started
+ discoverEndpointsInvoked.Done()
+ }).Return("telemetry-endpoint", nil)
+ client.EXPECT().DiscoverTelemetryEndpoint(gomock.Any()).Return(
+ "tele-endpoint", nil).AnyTimes()
+
+ gomock.InOrder(
+ dockerClient.EXPECT().SupportedVersions().Return(apiVersions),
+ mockCredentialsProvider.EXPECT().Retrieve().Return(aws_credentials.Value{}, nil),
+ dockerClient.EXPECT().SupportedVersions().Return(nil),
+ dockerClient.EXPECT().KnownVersions().Return(nil),
+ mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{""}, nil),
+ dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(),
+ gomock.Any()).AnyTimes().Return([]string{}, nil),
+ client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ "arn:123", availabilityZone, nil),
+ containermetadata.EXPECT().SetContainerInstanceARN("arn:123"),
+ containermetadata.EXPECT().SetAvailabilityZone(availabilityZone),
+ imageManager.EXPECT().SetSaver(gomock.Any()),
+ dockerClient.EXPECT().ContainerEvents(gomock.Any()),
+ state.EXPECT().AllImageStates().Return(nil),
+ state.EXPECT().AllTasks().Return(nil),
+ )
+
+ cfg := getTestConfig()
+ cfg.ContainerMetadataEnabled = true
+ ctx, cancel := context.WithCancel(context.TODO())
+
+ // Cancel the context to cancel async routines
+ defer cancel()
+ agent := &ecsAgent{
+ ctx: ctx,
+ cfg: &cfg,
+ dockerClient: dockerClient,
+ credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider),
+ mobyPlugins: mockMobyPlugins,
+ metadataManager: containermetadata,
+ terminationHandler: func(saver statemanager.Saver, taskEngine engine.TaskEngine) {},
+ }
+
+ go agent.doStart(eventstream.NewEventStream("events", ctx),
+ credentialsManager, state, imageManager, client)
+
+ discoverEndpointsInvoked.Wait()
+}
+
func TestNewTaskEngineRestoreFromCheckpointNoEC2InstanceIDToLoadHappyPath(t *testing.T) {
ctrl, credentialsManager, state, imageManager, _,
dockerClient, stateManagerFactory, saveableOptionFactory := setup(t)
@@ -308,8 +381,9 @@ func TestNewTaskEngineRestoreFromCheckpointNoEC2InstanceIDToLoadHappyPath(t *tes
}).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil),
- stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil),
+ stateManagerFactory.EXPECT().NewStateManager(gomock.Any(),
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
statemanager.NewNoopStateManager(), nil),
state.EXPECT().AllTasks().AnyTimes(),
ec2MetadataClient.EXPECT().InstanceID().Return(expectedInstanceID, nil),
@@ -358,8 +432,14 @@ func TestNewTaskEngineRestoreFromCheckpointPreviousEC2InstanceIDLoadedHappyPath(
assert.True(t, ok)
*previousEC2InstanceID = "inst-2"
}).Return(nil),
- stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Do(
+ func(name string, saveable statemanager.Saveable) {
+ previousAZ, ok := saveable.(*string)
+ assert.True(t, ok)
+ *previousAZ = "us-west-2b"
+ }).Return(nil),
+ stateManagerFactory.EXPECT().NewStateManager(gomock.Any(),
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
statemanager.NewNoopStateManager(), nil),
state.EXPECT().AllTasks().AnyTimes(),
ec2MetadataClient.EXPECT().InstanceID().Return(expectedInstanceID, nil),
@@ -383,6 +463,7 @@ func TestNewTaskEngineRestoreFromCheckpointPreviousEC2InstanceIDLoadedHappyPath(
assert.NoError(t, err)
assert.Equal(t, expectedInstanceID, instanceID)
assert.NotEqual(t, "prev-container-inst", agent.containerInstanceARN)
+ assert.NotEqual(t, "us-west-2b", agent.availabilityZone)
}
func TestNewTaskEngineRestoreFromCheckpointClusterIDMismatch(t *testing.T) {
@@ -410,8 +491,10 @@ func TestNewTaskEngineRestoreFromCheckpointClusterIDMismatch(t *testing.T) {
*previousCluster = clusterName
}).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil),
- stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil),
+
+ stateManagerFactory.EXPECT().NewStateManager(gomock.Any(),
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
statemanager.NewNoopStateManager(), nil),
state.EXPECT().AllTasks().AnyTimes(),
ec2MetadataClient.EXPECT().InstanceID().Return(ec2InstanceID, nil),
@@ -446,8 +529,9 @@ func TestNewTaskEngineRestoreFromCheckpointNewStateManagerError(t *testing.T) {
saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil),
- stateManagerFactory.EXPECT().NewStateManager(gomock.Any(), gomock.Any(),
- gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil),
+ stateManagerFactory.EXPECT().NewStateManager(gomock.Any(),
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
nil, errors.New("error")),
)
@@ -480,8 +564,9 @@ func TestNewTaskEngineRestoreFromCheckpointStateLoadError(t *testing.T) {
saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil),
+ saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil),
stateManagerFactory.EXPECT().NewStateManager(gomock.Any(),
- gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(stateManager, nil),
stateManager.EXPECT().Load().Return(errors.New("error")),
)
@@ -516,8 +601,9 @@ func TestNewTaskEngineRestoreFromCheckpoint(t *testing.T) {
saveableOptionFactory.EXPECT().AddSaveable("ContainerInstanceArn", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("Cluster", gomock.Any()).Return(nil),
saveableOptionFactory.EXPECT().AddSaveable("EC2InstanceID", gomock.Any()).Return(nil),
+ saveableOptionFactory.EXPECT().AddSaveable("availabilityZone", gomock.Any()).Return(nil),
stateManagerFactory.EXPECT().NewStateManager(gomock.Any(),
- gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
+ gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return(statemanager.NewNoopStateManager(), nil),
state.EXPECT().AllTasks().AnyTimes(),
ec2MetadataClient.EXPECT().InstanceID().Return(expectedInstanceID, nil),
@@ -590,7 +676,8 @@ func TestReregisterContainerInstanceHappyPath(t *testing.T) {
mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{""}, nil),
mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(), gomock.Any()).Return(containerInstanceARN, nil),
+ client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(),
+ gomock.Any(), gomock.Any()).Return(containerInstanceARN, availabilityZone, nil),
)
cfg := getTestConfig()
cfg.Cluster = clusterName
@@ -605,6 +692,7 @@ func TestReregisterContainerInstanceHappyPath(t *testing.T) {
mobyPlugins: mockMobyPlugins,
}
agent.containerInstanceARN = containerInstanceARN
+ agent.availabilityZone = availabilityZone
err := agent.registerContainerInstance(stateManager, client, nil)
assert.NoError(t, err)
@@ -627,8 +715,8 @@ func TestReregisterContainerInstanceInstanceTypeChanged(t *testing.T) {
mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{""}, nil),
mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(), gomock.Any()).Return(
- "", awserr.New("", apierrors.InstanceTypeChangedErrorMessage, errors.New(""))),
+ client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ "", "", awserr.New("", apierrors.InstanceTypeChangedErrorMessage, errors.New(""))),
)
cfg := getTestConfig()
@@ -644,6 +732,7 @@ func TestReregisterContainerInstanceInstanceTypeChanged(t *testing.T) {
mobyPlugins: mockMobyPlugins,
}
agent.containerInstanceARN = containerInstanceARN
+ agent.availabilityZone = availabilityZone
err := agent.registerContainerInstance(stateManager, client, nil)
assert.Error(t, err)
@@ -667,8 +756,8 @@ func TestReregisterContainerInstanceAttributeError(t *testing.T) {
mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil),
mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(), gomock.Any()).Return(
- "", apierrors.NewAttributeError("error")),
+ client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ "", "", apierrors.NewAttributeError("error")),
)
cfg := getTestConfig()
@@ -684,6 +773,7 @@ func TestReregisterContainerInstanceAttributeError(t *testing.T) {
mobyPlugins: mockMobyPlugins,
}
agent.containerInstanceARN = containerInstanceARN
+ agent.availabilityZone = availabilityZone
err := agent.registerContainerInstance(stateManager, client, nil)
assert.Error(t, err)
@@ -707,8 +797,8 @@ func TestReregisterContainerInstanceNonTerminalError(t *testing.T) {
mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil),
mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(), gomock.Any()).Return(
- "", errors.New("error")),
+ client.EXPECT().RegisterContainerInstance(containerInstanceARN, gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ "", "", errors.New("error")),
)
cfg := getTestConfig()
@@ -724,6 +814,7 @@ func TestReregisterContainerInstanceNonTerminalError(t *testing.T) {
mobyPlugins: mockMobyPlugins,
}
agent.containerInstanceARN = containerInstanceARN
+ agent.availabilityZone = availabilityZone
err := agent.registerContainerInstance(stateManager, client, nil)
assert.Error(t, err)
@@ -747,7 +838,7 @@ func TestRegisterContainerInstanceWhenContainerInstanceARNIsNotSetHappyPath(t *t
mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil),
mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any()).Return(containerInstanceARN, nil),
+ client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any(), gomock.Any()).Return(containerInstanceARN, availabilityZone, nil),
stateManager.EXPECT().Save(),
)
@@ -763,10 +854,10 @@ func TestRegisterContainerInstanceWhenContainerInstanceARNIsNotSetHappyPath(t *t
credentialProvider: aws_credentials.NewCredentials(mockCredentialsProvider),
mobyPlugins: mockMobyPlugins,
}
-
err := agent.registerContainerInstance(stateManager, client, nil)
assert.NoError(t, err)
assert.Equal(t, containerInstanceARN, agent.containerInstanceARN)
+ assert.Equal(t, availabilityZone, agent.availabilityZone)
}
func TestRegisterContainerInstanceWhenContainerInstanceARNIsNotSetCanRetryError(t *testing.T) {
@@ -787,7 +878,7 @@ func TestRegisterContainerInstanceWhenContainerInstanceARNIsNotSetCanRetryError(
mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil),
mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any()).Return("", retriableError),
+ client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any(), gomock.Any()).Return("", "", retriableError),
)
cfg := getTestConfig()
@@ -826,7 +917,7 @@ func TestRegisterContainerInstanceWhenContainerInstanceARNIsNotSetCannotRetryErr
mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil),
mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any()).Return("", cannotRetryError),
+ client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any(), gomock.Any()).Return("", "", cannotRetryError),
)
cfg := getTestConfig()
@@ -864,8 +955,8 @@ func TestRegisterContainerInstanceWhenContainerInstanceARNIsNotSetAttributeError
mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil),
mockDockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any()).AnyTimes().Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any()).Return(
- "", apierrors.NewAttributeError("error")),
+ client.EXPECT().RegisterContainerInstance("", gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ "", "", apierrors.NewAttributeError("error")),
)
cfg := getTestConfig()
@@ -902,8 +993,8 @@ func TestRegisterContainerInstanceInvalidParameterTerminalError(t *testing.T) {
mockMobyPlugins.EXPECT().Scan().AnyTimes().Return([]string{}, nil),
dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any()).AnyTimes().Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any()).Return(
- "", awserr.New("InvalidParameterException", "", nil)),
+ client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
+ "", "", awserr.New("InvalidParameterException", "", nil)),
)
cfg := getTestConfig()
diff --git a/agent/app/agent_unix_test.go b/agent/app/agent_unix_test.go
index 6fa5e940c44..0c43b3cc316 100644
--- a/agent/app/agent_unix_test.go
+++ b/agent/app/agent_unix_test.go
@@ -94,7 +94,7 @@ func TestDoStartHappyPath(t *testing.T) {
mockMobyPlugins.EXPECT().Scan().Return([]string{}, nil),
dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any()).Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any()).Return("arn", nil),
+ client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return("arn", "", nil),
imageManager.EXPECT().SetSaver(gomock.Any()),
dockerClient.EXPECT().ContainerEvents(gomock.Any()).Return(containerChangeEvents, nil),
state.EXPECT().AllImageStates().Return(nil),
@@ -178,8 +178,8 @@ func TestDoStartTaskENIHappyPath(t *testing.T) {
mockMobyPlugins.EXPECT().Scan().Return([]string{}, nil),
dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any()).Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any()).Do(
- func(x interface{}, attributes []*ecs.Attribute, y interface{}) {
+ client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do(
+ func(x interface{}, attributes []*ecs.Attribute, y interface{}, z interface{}) {
vpcFound := false
subnetFound := false
for _, attribute := range attributes {
@@ -194,7 +194,7 @@ func TestDoStartTaskENIHappyPath(t *testing.T) {
}
assert.True(t, vpcFound)
assert.True(t, subnetFound)
- }).Return("arn", nil),
+ }).Return("arn", "", nil),
imageManager.EXPECT().SetSaver(gomock.Any()),
dockerClient.EXPECT().ContainerEvents(gomock.Any()).Return(containerChangeEvents, nil),
state.EXPECT().AllImageStates().Return(nil),
@@ -491,7 +491,7 @@ func TestDoStartCgroupInitHappyPath(t *testing.T) {
mockMobyPlugins.EXPECT().Scan().Return([]string{}, nil),
dockerClient.EXPECT().ListPluginsWithFilters(gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any()).Return([]string{}, nil),
- client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any()).Return("arn", nil),
+ client.EXPECT().RegisterContainerInstance(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return("arn", "", nil),
imageManager.EXPECT().SetSaver(gomock.Any()),
dockerClient.EXPECT().ContainerEvents(gomock.Any()).Return(containerChangeEvents, nil),
state.EXPECT().AllImageStates().Return(nil),
diff --git a/agent/asm/asm.go b/agent/asm/asm.go
index 1138f468bcf..98d6d907002 100644
--- a/agent/asm/asm.go
+++ b/agent/asm/asm.go
@@ -86,3 +86,18 @@ func extractASMValue(out *secretsmanager.GetSecretValueOutput) (types.AuthConfig
return dac, nil
}
+
+// GetSecretFromASM makes the api call to the AWS Secrets Manager service to
+// retrieve the secret value
+func GetSecretFromASM(secretID string, client secretsmanageriface.SecretsManagerAPI) (string, error) {
+ in := &secretsmanager.GetSecretValueInput{
+ SecretId: aws.String(secretID),
+ }
+
+ out, err := client.GetSecretValue(in)
+ if err != nil {
+ return "", errors.Wrapf(err, "secret %s", secretID)
+ }
+
+ return aws.StringValue(out.SecretString), nil
+}
diff --git a/agent/asm/asm_test.go b/agent/asm/asm_test.go
index bdb501e5a2c..a78c01dd0db 100644
--- a/agent/asm/asm_test.go
+++ b/agent/asm/asm_test.go
@@ -109,3 +109,13 @@ func TestASMGetAuthConfig(t *testing.T) {
})
}
}
+
+func TestGetSecretFromASM(t *testing.T) {
+ asmClient := mockGetSecretValue{
+ Resp: secretsmanager.GetSecretValueOutput{
+ SecretString: aws.String("secretValue"),
+ },
+ }
+ _, err := GetSecretFromASM("secretName", asmClient)
+ assert.NoError(t, err)
+}
diff --git a/agent/config/config.go b/agent/config/config.go
index ba805d2e82f..a4557d8bb89 100644
--- a/agent/config/config.go
+++ b/agent/config/config.go
@@ -47,6 +47,9 @@ const (
// AgentCredentialsPort is used to serve the credentials for tasks.
AgentCredentialsPort = 51679
+ // AgentPrometheusExpositionPort is used to expose Prometheus metrics that can be scraped by a Prometheus server
+ AgentPrometheusExpositionPort = 51680
+
// defaultConfigFileName is the default (json-formatted) config file
defaultConfigFileName = "/etc/ecs_container_agent/config.json"
@@ -474,6 +477,7 @@ func environmentConfig() (Config, error) {
ImageCleanupInterval: parseEnvVariableDuration("ECS_IMAGE_CLEANUP_INTERVAL"),
NumImagesToDeletePerCycle: parseNumImagesToDeletePerCycle(),
ImagePullBehavior: parseImagePullBehavior(),
+ ImageCleanupExclusionList: parseImageCleanupExclusionList("ECS_IMAGE_CLEANUP_EXCLUDE"),
InstanceAttributes: instanceAttributes,
CNIPluginsPath: os.Getenv("ECS_CNI_PLUGINS_PATH"),
AWSVPCBlockInstanceMetdata: utils.ParseBool(os.Getenv("ECS_AWSVPC_BLOCK_IMDS"), false),
diff --git a/agent/config/config_test.go b/agent/config/config_test.go
index 2c7eadff663..12e8ed2c221 100644
--- a/agent/config/config_test.go
+++ b/agent/config/config_test.go
@@ -342,6 +342,13 @@ func TestInvalidFormatParseEnvVariableDuration(t *testing.T) {
assert.Zero(t, duration, "Expected 0 from parseEnvVariableDuration for invalid format")
}
+func TestValidForImagesCleanupExclusion(t *testing.T) {
+ defer setTestRegion()()
+ defer setTestEnv("ECS_IMAGE_CLEANUP_EXCLUDE", "amazonlinux:2,amazonlinux:3")()
+ imagesNotDelete := parseImageCleanupExclusionList("ECS_IMAGE_CLEANUP_EXCLUDE")
+ assert.Equal(t, []string{"amazonlinux:2", "amazonlinux:3"}, imagesNotDelete, "unexpected imageCleanupExclusionList")
+}
+
func TestValidFormatParseEnvVariableDuration(t *testing.T) {
defer setTestRegion()()
setTestEnv("FOO", "1s")
diff --git a/agent/config/config_unix.go b/agent/config/config_unix.go
index 9035502e220..d59ac8cfb1f 100644
--- a/agent/config/config_unix.go
+++ b/agent/config/config_unix.go
@@ -16,9 +16,11 @@ package config
import (
"fmt"
+ "os"
"time"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
+ "github.com/aws/amazon-ecs-agent/agent/utils"
)
const (
@@ -73,10 +75,16 @@ func DefaultConfig() Config {
SharedVolumeMatchFullConfig: false, // only requiring shared volumes to match on name, which is default docker behavior
ImagePullInactivityTimeout: defaultImagePullInactivityTimeout,
ContainerInstancePropagateTagsFrom: ContainerInstancePropagateTagsFromNoneType,
+ PrometheusMetricsEnabled: false,
}
}
-func (cfg *Config) platformOverrides() {}
+func (cfg *Config) platformOverrides() {
+ cfg.PrometheusMetricsEnabled = utils.ParseBool(os.Getenv("ECS_ENABLE_PROMETHEUS_METRICS"), false)
+ if cfg.PrometheusMetricsEnabled {
+ cfg.ReservedPorts = append(cfg.ReservedPorts, AgentPrometheusExpositionPort)
+ }
+}
// platformString returns platform-specific config data that can be serialized
// to string for debugging
diff --git a/agent/config/config_unix_test.go b/agent/config/config_unix_test.go
index cec8dc58f7d..61de9302f35 100644
--- a/agent/config/config_unix_test.go
+++ b/agent/config/config_unix_test.go
@@ -180,6 +180,17 @@ func TestBadFileContent(t *testing.T) {
assert.Error(t, err, "create configuration should fail")
}
+func TestPrometheusMetricsPlatformOverrides(t *testing.T) {
+ defer setTestRegion()()
+ cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
+ require.NoError(t, err)
+
+ defer setTestEnv("ECS_ENABLE_PROMETHEUS_METRICS", "true")()
+ cfg.platformOverrides()
+ assert.True(t, cfg.PrometheusMetricsEnabled, "Prometheus metrics should be enabled")
+ assert.Equal(t, 6, len(cfg.ReservedPorts), "Reserved ports should have added Prometheus endpoint")
+}
+
// setupFileConfiguration create a temp file store the configuration
func setupFileConfiguration(t *testing.T, configContent string) string {
file, err := ioutil.TempFile("", "ecs-test")
diff --git a/agent/config/parse.go b/agent/config/parse.go
index 3cfcf9aef50..1ca63290cc7 100644
--- a/agent/config/parse.go
+++ b/agent/config/parse.go
@@ -280,3 +280,17 @@ func parseEnvVariableDuration(envVar string) time.Duration {
}
return duration
}
+
+func parseImageCleanupExclusionList(envVar string) []string {
+ imageEnv := os.Getenv(envVar)
+ var imageCleanupExclusionList []string
+ if imageEnv == "" {
+ seelog.Debugf("Environment variable empty: %s", imageEnv)
+ } else {
+ imageCleanupExclusionList = strings.Split(imageEnv, ",")
+ }
+ for _, image := range imageCleanupExclusionList {
+ seelog.Infof("Image excluded from cleanup: %s", image)
+ }
+ return imageCleanupExclusionList
+}
diff --git a/agent/config/types.go b/agent/config/types.go
index 7d9b293cbf8..99f1d54398a 100644
--- a/agent/config/types.go
+++ b/agent/config/types.go
@@ -189,6 +189,11 @@ type Config struct {
// the image from the tarball; the referenced image must already be loaded.
PauseContainerTag string
+ // PrometheusMetricsEnabled configures whether Agent metrics should be
+ // collected and published to the specified endpoint. This is disabled by
+ // default.
+ PrometheusMetricsEnabled bool
+
// AWSVPCBlockInstanceMetdata specifies if InstanceMetadata endpoint should be blocked
// for tasks that are launched with network mode "awsvpc" when ECS_AWSVPC_BLOCK_IMDS=true
AWSVPCBlockInstanceMetdata bool
@@ -248,4 +253,7 @@ type Config struct {
// through RegisterContainerInstance call. Tags with the same keys from DescribeTags
// API call will be overridden.
ContainerInstanceTags map[string]string
+
+ // ImageCleanupExclusionList is the list of image names customers want to keep for their own use and delete automatically
+ ImageCleanupExclusionList []string
}
diff --git a/agent/containermetadata/manager.go b/agent/containermetadata/manager.go
index 6e3959d1095..1df53372ac2 100644
--- a/agent/containermetadata/manager.go
+++ b/agent/containermetadata/manager.go
@@ -41,6 +41,7 @@ const (
// operations
type Manager interface {
SetContainerInstanceARN(string)
+ SetAvailabilityZone(string)
Create(*dockercontainer.Config, *dockercontainer.HostConfig, *apitask.Task, string) error
Update(context.Context, string, *apitask.Task, string) error
Clean(string) error
@@ -65,6 +66,8 @@ type metadataManager struct {
osWrap oswrapper.OS
// ioutilWrap is a wrapper for 'ioutil' package operations
ioutilWrap ioutilwrapper.IOUtil
+ // availabilityZone is the availabiltyZone where task is in
+ availabilityZone string
}
// NewManager creates a metadataManager for a given DockerTaskEngine settings.
@@ -85,6 +88,12 @@ func (manager *metadataManager) SetContainerInstanceARN(containerInstanceARN str
manager.containerInstanceARN = containerInstanceARN
}
+// SetAvailabilityzone sets the metadataManager's AvailabilityZone which is not available
+// at its creation as this information is not present immediately at the agent's startup
+func (manager *metadataManager) SetAvailabilityZone(availabilityZone string) {
+ manager.availabilityZone = availabilityZone
+}
+
// Create creates the metadata file and adds the metadata directory to
// the container's mounted host volumes
// Pointer hostConfig is modified directly so there is risk of concurrency errors.
diff --git a/agent/containermetadata/manager_test.go b/agent/containermetadata/manager_test.go
index 6c3efdeb555..56e5c83e481 100644
--- a/agent/containermetadata/manager_test.go
+++ b/agent/containermetadata/manager_test.go
@@ -39,6 +39,7 @@ const (
taskDefinitionRevision = "8"
containerName = "container"
dataDir = "ecs_mockdata"
+ availabilityZone = "us-west-2b"
)
func managerSetup(t *testing.T) (*mock_containermetadata.MockDockerMetadataClient, *mock_ioutilwrapper.MockIOUtil, *mock_oswrapper.MockOS, *mock_oswrapper.MockFile, func()) {
@@ -62,6 +63,16 @@ func TestSetContainerInstanceARN(t *testing.T) {
assert.Equal(t, mockARN, newManager.containerInstanceARN)
}
+// TestAvailabilityZone checks whether the container availabilityZone is set correctly.
+func TestSetAvailabilityZone(t *testing.T) {
+ _, _, _, _, done := managerSetup(t)
+ defer done()
+ mockAvailabilityZone := availabilityZone
+ newManager := &metadataManager{}
+ newManager.SetAvailabilityZone(mockAvailabilityZone)
+ assert.Equal(t, mockAvailabilityZone, newManager.availabilityZone)
+}
+
// TestCreateMalformedFilepath checks case when taskARN is invalid resulting in an invalid file path
func TestCreateMalformedFilepath(t *testing.T) {
_, _, _, _, done := managerSetup(t)
diff --git a/agent/containermetadata/mocks/containermetadata_mocks.go b/agent/containermetadata/mocks/containermetadata_mocks.go
index 4d22f814f14..b1060ded4a6 100644
--- a/agent/containermetadata/mocks/containermetadata_mocks.go
+++ b/agent/containermetadata/mocks/containermetadata_mocks.go
@@ -75,6 +75,16 @@ func (mr *MockManagerMockRecorder) Create(arg0, arg1, arg2, arg3 interface{}) *g
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockManager)(nil).Create), arg0, arg1, arg2, arg3)
}
+// SetAvailabilityZone mocks base method
+func (m *MockManager) SetAvailabilityZone(arg0 string) {
+ m.ctrl.Call(m, "SetAvailabilityZone", arg0)
+}
+
+// SetAvailabilityZone indicates an expected call of SetAvailabilityZone
+func (mr *MockManagerMockRecorder) SetAvailabilityZone(arg0 interface{}) *gomock.Call {
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAvailabilityZone", reflect.TypeOf((*MockManager)(nil).SetAvailabilityZone), arg0)
+}
+
// SetContainerInstanceARN mocks base method
func (m *MockManager) SetContainerInstanceARN(arg0 string) {
m.ctrl.Call(m, "SetContainerInstanceARN", arg0)
diff --git a/agent/containermetadata/parse_metadata.go b/agent/containermetadata/parse_metadata.go
index ecbb499c40e..76cf2a3d524 100644
--- a/agent/containermetadata/parse_metadata.go
+++ b/agent/containermetadata/parse_metadata.go
@@ -40,6 +40,7 @@ func (manager *metadataManager) parseMetadataAtContainerCreate(task *apitask.Tas
},
containerInstanceARN: manager.containerInstanceARN,
metadataStatus: MetadataInitial,
+ availabilityZone: manager.availabilityZone,
}
}
@@ -60,6 +61,7 @@ func (manager *metadataManager) parseMetadata(dockerContainer *types.ContainerJS
dockerContainerMetadata: dockerMD,
containerInstanceARN: manager.containerInstanceARN,
metadataStatus: MetadataReady,
+ availabilityZone: manager.availabilityZone,
}
}
diff --git a/agent/containermetadata/parse_metadata_test.go b/agent/containermetadata/parse_metadata_test.go
index 8a8a3c009ba..ebed2d67579 100644
--- a/agent/containermetadata/parse_metadata_test.go
+++ b/agent/containermetadata/parse_metadata_test.go
@@ -40,12 +40,14 @@ func TestParseContainerCreate(t *testing.T) {
mockContainerName := containerName
mockCluster := cluster
mockContainerInstanceARN := containerInstanceARN
+ mockAvailabilityZone := availabilityZone
expectedStatus := string(MetadataInitial)
newManager := &metadataManager{
cluster: mockCluster,
containerInstanceARN: mockContainerInstanceARN,
+ availabilityZone: mockAvailabilityZone,
}
metadata := newManager.parseMetadataAtContainerCreate(mockTask, mockContainerName)
@@ -53,6 +55,7 @@ func TestParseContainerCreate(t *testing.T) {
assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName)
assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN)
assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN)
+ assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone)
assert.Equal(t, metadata.taskMetadata.taskDefinitionFamily, mockTaskDefinitionFamily, "Expected task definition family "+mockTaskDefinitionFamily)
assert.Equal(t, metadata.taskMetadata.taskDefinitionRevision, mockTaskDefinitionRevision, "Expected task definition revision "+mockTaskDefinitionRevision)
assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus)
@@ -64,12 +67,14 @@ func TestParseHasNoContainer(t *testing.T) {
mockContainerName := containerName
mockCluster := cluster
mockContainerInstanceARN := containerInstanceARN
+ mockAvailabilityZone := availabilityZone
expectedStatus := string(MetadataReady)
newManager := &metadataManager{
cluster: mockCluster,
containerInstanceARN: mockContainerInstanceARN,
+ availabilityZone: mockAvailabilityZone,
}
metadata := newManager.parseMetadata(nil, mockTask, mockContainerName)
@@ -77,6 +82,7 @@ func TestParseHasNoContainer(t *testing.T) {
assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName)
assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN)
assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN)
+ assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone)
assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus)
assert.Equal(t, metadata.dockerContainerMetadata.containerID, "", "Expected empty container metadata")
assert.Equal(t, metadata.dockerContainerMetadata.dockerContainerName, "", "Expected empty container metadata")
@@ -90,6 +96,7 @@ func TestParseHasConfig(t *testing.T) {
mockContainerName := containerName
mockCluster := cluster
mockContainerInstanceARN := containerInstanceARN
+ mockAvailabilityZone := availabilityZone
mockConfig := &dockercontainer.Config{Image: "image"}
@@ -106,6 +113,7 @@ func TestParseHasConfig(t *testing.T) {
newManager := &metadataManager{
cluster: mockCluster,
containerInstanceARN: mockContainerInstanceARN,
+ availabilityZone: mockAvailabilityZone,
}
metadata := newManager.parseMetadata(mockContainer, mockTask, mockContainerName)
@@ -114,6 +122,7 @@ func TestParseHasConfig(t *testing.T) {
assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName)
assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN)
assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN)
+ assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone)
assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus)
assert.Equal(t, metadata.dockerContainerMetadata.imageName, "image", "Expected nonempty imageID")
}
@@ -124,6 +133,7 @@ func TestParseHasNetworkSettingsPortBindings(t *testing.T) {
mockContainerName := containerName
mockCluster := cluster
mockContainerInstanceARN := containerInstanceARN
+ mockAvailabilityZone := availabilityZone
mockPorts := nat.PortMap{}
mockPortBinding := make([]nat.PortBinding, 0)
@@ -152,6 +162,7 @@ func TestParseHasNetworkSettingsPortBindings(t *testing.T) {
newManager := &metadataManager{
cluster: mockCluster,
containerInstanceARN: mockContainerInstanceARN,
+ availabilityZone: mockAvailabilityZone,
}
metadata := newManager.parseMetadata(mockContainer, mockTask, mockContainerName)
@@ -159,6 +170,7 @@ func TestParseHasNetworkSettingsPortBindings(t *testing.T) {
assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName)
assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN)
assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN)
+ assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone)
assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus)
assert.Equal(t, len(metadata.dockerContainerMetadata.networkInfo.networks), 2, "Expected two networks")
@@ -174,6 +186,7 @@ func TestParseHasNetworkSettingsNetworksEmpty(t *testing.T) {
mockContainerName := containerName
mockCluster := cluster
mockContainerInstanceARN := containerInstanceARN
+ mockAvailabilityZone := availabilityZone
mockHostConfig := &dockercontainer.HostConfig{NetworkMode: "bridge"}
mockNetworkSettings := &types.NetworkSettings{
@@ -192,6 +205,7 @@ func TestParseHasNetworkSettingsNetworksEmpty(t *testing.T) {
newManager := &metadataManager{
cluster: mockCluster,
containerInstanceARN: mockContainerInstanceARN,
+ availabilityZone: mockAvailabilityZone,
}
metadata := newManager.parseMetadata(mockContainer, mockTask, mockContainerName)
@@ -199,6 +213,7 @@ func TestParseHasNetworkSettingsNetworksEmpty(t *testing.T) {
assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName)
assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN)
assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN)
+ assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone)
assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus)
assert.Equal(t, len(metadata.dockerContainerMetadata.networkInfo.networks), 1, "Expected one network")
}
@@ -209,6 +224,7 @@ func TestParseHasNetworkSettingsNetworksNonEmpty(t *testing.T) {
mockContainerName := containerName
mockCluster := cluster
mockContainerInstanceARN := containerInstanceARN
+ mockAvailabilityZone := availabilityZone
mockHostConfig := &dockercontainer.HostConfig{NetworkMode: dockercontainer.NetworkMode("bridge")}
mockNetworks := map[string]*network.EndpointSettings{}
@@ -229,6 +245,7 @@ func TestParseHasNetworkSettingsNetworksNonEmpty(t *testing.T) {
newManager := &metadataManager{
cluster: mockCluster,
containerInstanceARN: mockContainerInstanceARN,
+ availabilityZone: mockAvailabilityZone,
}
metadata := newManager.parseMetadata(mockContainer, mockTask, mockContainerName)
@@ -236,6 +253,7 @@ func TestParseHasNetworkSettingsNetworksNonEmpty(t *testing.T) {
assert.Equal(t, metadata.taskMetadata.containerName, mockContainerName, "Expected container name "+mockContainerName)
assert.Equal(t, metadata.taskMetadata.taskARN, mockTaskARN, "Expected task ARN "+mockTaskARN)
assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN)
+ assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected AvailabilityZone"+mockAvailabilityZone)
assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus)
assert.Equal(t, len(metadata.dockerContainerMetadata.networkInfo.networks), 2, "Expected two networks")
}
@@ -280,6 +298,7 @@ func TestParseTaskDefinitionSettings(t *testing.T) {
mockContainerName := containerName
mockCluster := cluster
mockContainerInstanceARN := containerInstanceARN
+ mockAvailabilityZone := availabilityZone
mockHostConfig := &dockercontainer.HostConfig{NetworkMode: dockercontainer.NetworkMode("bridge")}
mockConfig := &dockercontainer.Config{Image: "image"}
@@ -301,6 +320,7 @@ func TestParseTaskDefinitionSettings(t *testing.T) {
newManager := &metadataManager{
cluster: mockCluster,
containerInstanceARN: mockContainerInstanceARN,
+ availabilityZone: mockAvailabilityZone,
}
metadata := newManager.parseMetadata(mockContainer, mockTask, mockContainerName)
@@ -310,6 +330,7 @@ func TestParseTaskDefinitionSettings(t *testing.T) {
assert.Equal(t, metadata.taskMetadata.taskDefinitionFamily, "", "Expected no task definition family")
assert.Equal(t, metadata.taskMetadata.taskDefinitionRevision, "", "Expected no task definition revision")
assert.Equal(t, metadata.containerInstanceARN, mockContainerInstanceARN, "Expected container instance ARN "+mockContainerInstanceARN)
+ assert.Equal(t, metadata.availabilityZone, mockAvailabilityZone, "Expected availabilityZone "+mockAvailabilityZone)
assert.Equal(t, string(metadata.metadataStatus), expectedStatus, "Expected status "+expectedStatus)
// now add the task definition details
diff --git a/agent/containermetadata/types.go b/agent/containermetadata/types.go
index e37fc22f797..a9136e9516c 100644
--- a/agent/containermetadata/types.go
+++ b/agent/containermetadata/types.go
@@ -129,6 +129,7 @@ type Metadata struct {
dockerContainerMetadata DockerContainerMetadata
containerInstanceARN string
metadataStatus MetadataStatus
+ availabilityZone string
}
// metadataSerializer is an intermediate struct that converts the information
@@ -147,6 +148,7 @@ type metadataSerializer struct {
Ports []apicontainer.PortBinding `json:"PortMappings,omitempty"`
Networks []Network `json:"Networks,omitempty"`
MetadataFileStatus MetadataStatus `json:"MetadataFileStatus,omitempty"`
+ AvailabilityZone string `json:"AvailabilityZone,omitempty"`
}
func (m Metadata) MarshalJSON() ([]byte, error) {
@@ -165,5 +167,6 @@ func (m Metadata) MarshalJSON() ([]byte, error) {
Ports: m.dockerContainerMetadata.ports,
Networks: m.dockerContainerMetadata.networkInfo.networks,
MetadataFileStatus: m.metadataStatus,
+ AvailabilityZone: m.availabilityZone,
})
}
diff --git a/agent/dockerclient/dockerapi/docker_client.go b/agent/dockerclient/dockerapi/docker_client.go
index 7e54275ec7c..7e8646ad5be 100644
--- a/agent/dockerclient/dockerapi/docker_client.go
+++ b/agent/dockerclient/dockerapi/docker_client.go
@@ -37,6 +37,7 @@ import (
"github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory"
"github.com/aws/amazon-ecs-agent/agent/ecr"
+ "github.com/aws/amazon-ecs-agent/agent/metrics"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
@@ -319,7 +320,7 @@ func (dg *dockerGoClient) PullImage(image string, authData *apicontainer.Registr
timeout := dg.time().After(pullImageTimeout)
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
-
+ defer metrics.MetricsEngineGlobal.RecordDockerMetric("PULL_IMAGE")()
response := make(chan DockerContainerMetadata, 1)
go func() {
imagePullBackoff := utils.NewSimpleBackoff(minimumPullRetryDelay,
@@ -482,6 +483,7 @@ func getRepository(image string) string {
}
func (dg *dockerGoClient) InspectImage(image string) (*types.ImageInspect, error) {
+ defer metrics.MetricsEngineGlobal.RecordDockerMetric("INSPECT_IMAGE")()
client, err := dg.sdkDockerClient()
if err != nil {
return nil, err
@@ -520,7 +522,7 @@ func (dg *dockerGoClient) CreateContainer(ctx context.Context,
timeout time.Duration) DockerContainerMetadata {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
-
+ defer metrics.MetricsEngineGlobal.RecordDockerMetric("CREATE_CONTAINER")()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan DockerContainerMetadata, 1)
@@ -564,7 +566,7 @@ func (dg *dockerGoClient) createContainer(ctx context.Context,
func (dg *dockerGoClient) StartContainer(ctx context.Context, id string, timeout time.Duration) DockerContainerMetadata {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
-
+ defer metrics.MetricsEngineGlobal.RecordDockerMetric("START_CONTAINER")()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan DockerContainerMetadata, 1)
@@ -632,7 +634,7 @@ func (dg *dockerGoClient) InspectContainer(ctx context.Context, dockerID string,
}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
-
+ defer metrics.MetricsEngineGlobal.RecordDockerMetric("INSPECT_CONTAINER")()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan inspectResponse, 1)
@@ -667,7 +669,7 @@ func (dg *dockerGoClient) inspectContainer(ctx context.Context, dockerID string)
func (dg *dockerGoClient) StopContainer(ctx context.Context, dockerID string, timeout time.Duration) DockerContainerMetadata {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
-
+ defer metrics.MetricsEngineGlobal.RecordDockerMetric("STOP_CONTAINER")()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan DockerContainerMetadata, 1)
@@ -709,7 +711,7 @@ func (dg *dockerGoClient) stopContainer(ctx context.Context, dockerID string) Do
func (dg *dockerGoClient) RemoveContainer(ctx context.Context, dockerID string, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
-
+ defer metrics.MetricsEngineGlobal.RecordDockerMetric("REMOVE_CONTAINER")()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan error, 1)
@@ -943,8 +945,8 @@ func (dg *dockerGoClient) handleContainerEvents(ctx context.Context,
metadata := dg.containerMetadata(ctx, containerID)
changedContainers <- DockerContainerChangeEvent{
- Status: status,
- Type: eventType,
+ Status: status,
+ Type: eventType,
DockerContainerMetadata: metadata,
}
}
@@ -1048,7 +1050,7 @@ func (dg *dockerGoClient) CreateVolume(ctx context.Context, name string,
timeout time.Duration) SDKVolumeResponse {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
-
+ defer metrics.MetricsEngineGlobal.RecordDockerMetric("CREATE_VOLUME")()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan SDKVolumeResponse, 1)
@@ -1098,7 +1100,7 @@ func (dg *dockerGoClient) createVolume(ctx context.Context,
func (dg *dockerGoClient) InspectVolume(ctx context.Context, name string, timeout time.Duration) SDKVolumeResponse {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
-
+ defer metrics.MetricsEngineGlobal.RecordDockerMetric("INSPECT_VOLUME")()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan SDKVolumeResponse, 1)
@@ -1140,7 +1142,7 @@ func (dg *dockerGoClient) inspectVolume(ctx context.Context, name string) SDKVol
func (dg *dockerGoClient) RemoveVolume(ctx context.Context, name string, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
-
+ defer metrics.MetricsEngineGlobal.RecordDockerMetric("REMOVE_VOLUME")()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan error, 1)
@@ -1327,7 +1329,7 @@ func (dg *dockerGoClient) removeImage(ctx context.Context, imageName string) err
func (dg *dockerGoClient) LoadImage(ctx context.Context, inputStream io.Reader, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
-
+ defer metrics.MetricsEngineGlobal.RecordDockerMetric("LOAD_IMAGE")()
response := make(chan error, 1)
go func() {
response <- dg.loadImage(ctx, inputStream)
diff --git a/agent/ec2/ec2_client.go b/agent/ec2/ec2_client.go
index 909c0bc9a23..3bd372f9395 100644
--- a/agent/ec2/ec2_client.go
+++ b/agent/ec2/ec2_client.go
@@ -14,6 +14,8 @@
package ec2
import (
+ "strings"
+
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
@@ -30,6 +32,7 @@ const (
ResourceIDFilterName = "resource-id"
ResourceTypeFilterName = "resource-type"
ResourceTypeFilterValueInstance = "instance"
+ awsTagPrefix = "aws:"
)
type Client interface {
@@ -87,10 +90,14 @@ func (c *ClientImpl) DescribeECSTagsForInstance(instanceID string) ([]*ecs.Tag,
var tags []*ecs.Tag
// Convert ec2 tags to ecs tags
for _, ec2Tag := range res.Tags {
- tags = append(tags, &ecs.Tag{
- Key: ec2Tag.Key,
- Value: ec2Tag.Value,
- })
+ // Filter out all tags "aws:" prefix
+ if !strings.HasPrefix(strings.ToLower(aws.StringValue(ec2Tag.Key)), awsTagPrefix) &&
+ !strings.HasPrefix(strings.ToLower(aws.StringValue(ec2Tag.Value)), awsTagPrefix) {
+ tags = append(tags, &ecs.Tag{
+ Key: ec2Tag.Key,
+ Value: ec2Tag.Value,
+ })
+ }
}
return tags, nil
}
diff --git a/agent/ec2/ec2_client_test.go b/agent/ec2/ec2_client_test.go
index 15101d9852c..d9504677a57 100644
--- a/agent/ec2/ec2_client_test.go
+++ b/agent/ec2/ec2_client_test.go
@@ -59,6 +59,18 @@ func TestDescribeECSTagsForInstance(t *testing.T) {
Key: aws.String("key"),
Value: aws.String("value"),
},
+ {
+ Key: aws.String("aws:key"),
+ Value: aws.String("aws:value"),
+ },
+ {
+ Key: aws.String("aWS:key"),
+ Value: aws.String("value"),
+ },
+ {
+ Key: aws.String("key"),
+ Value: aws.String("Aws:value"),
+ },
},
}
diff --git a/agent/ecr/factory.go b/agent/ecr/factory.go
index 865126d9d88..cfe9a8617be 100644
--- a/agent/ecr/factory.go
+++ b/agent/ecr/factory.go
@@ -1,4 +1,4 @@
-// Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
@@ -50,14 +50,24 @@ func NewECRFactory(acceptInsecureCert bool) ECRFactory {
// GetClient creates the ECR SDK client based on the authdata
func (factory *ecrFactory) GetClient(authData *apicontainer.ECRAuthData) (ECRClient, error) {
- cfg := aws.NewConfig().WithRegion(authData.Region).WithHTTPClient(factory.httpClient)
+ clientConfig, err := getClientConfig(factory.httpClient, authData)
+ if err != nil {
+ return &ecrClient{}, err
+ }
+
+ return factory.newClient(clientConfig), nil
+}
+
+// getClientConfig returns the config for the ecr client based on authData
+func getClientConfig(httpClient *http.Client, authData *apicontainer.ECRAuthData) (*aws.Config, error) {
+ cfg := aws.NewConfig().WithRegion(authData.Region).WithHTTPClient(httpClient)
if authData.EndpointOverride != "" {
- cfg.Region = aws.String(authData.EndpointOverride)
+ cfg.Endpoint = aws.String(authData.EndpointOverride)
}
if authData.UseExecutionRole {
if authData.GetPullCredentials() == (credentials.IAMRoleCredentials{}) {
- return &ecrClient{}, fmt.Errorf("container uses execution credentials, but the credentials are empty")
+ return nil, fmt.Errorf("container uses execution credentials, but the credentials are empty")
}
creds := awscreds.NewStaticCredentials(authData.GetPullCredentials().AccessKeyID,
authData.GetPullCredentials().SecretAccessKey,
@@ -65,7 +75,7 @@ func (factory *ecrFactory) GetClient(authData *apicontainer.ECRAuthData) (ECRCli
cfg = cfg.WithCredentials(creds)
}
- return factory.newClient(cfg), nil
+ return cfg, nil
}
func (factory *ecrFactory) newClient(cfg *aws.Config) ECRClient {
diff --git a/agent/ecr/factory_test.go b/agent/ecr/factory_test.go
new file mode 100644
index 00000000000..a536eeb1e97
--- /dev/null
+++ b/agent/ecr/factory_test.go
@@ -0,0 +1,37 @@
+// +build unit
+
+// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package ecr
+
+import (
+ "testing"
+
+ apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetClientConfigEndpointOverride(t *testing.T) {
+ testAuthData := &apicontainer.ECRAuthData{
+ EndpointOverride: "ecr.us-west-2.amazonaws.com",
+ Region: "us-west-2",
+ UseExecutionRole: false,
+ }
+
+ cfg, err := getClientConfig(nil, testAuthData)
+
+ assert.Nil(t, err)
+ assert.Equal(t, testAuthData.EndpointOverride, *cfg.Endpoint)
+}
+
diff --git a/agent/ecs_client/model/api/api-2.json b/agent/ecs_client/model/api/api-2.json
index ba108a65099..f758c0ace06 100644
--- a/agent/ecs_client/model/api/api-2.json
+++ b/agent/ecs_client/model/api/api-2.json
@@ -734,6 +734,7 @@
"mountPoints":{"shape":"MountPointList"},
"volumesFrom":{"shape":"VolumeFromList"},
"linuxParameters":{"shape":"LinuxParameters"},
+ "secrets":{"shape":"SecretList"},
"hostname":{"shape":"String"},
"user":{"shape":"String"},
"workingDirectory":{"shape":"String"},
@@ -773,7 +774,10 @@
"agentUpdateStatus":{"shape":"AgentUpdateStatus"},
"attributes":{"shape":"Attributes"},
"registeredAt":{"shape":"Timestamp"},
- "attachments":{"shape":"Attachments"}
+ "attachments":{"shape":"Attachments"},
+ "clientToken":{
+ "shape":"String"
+ }
}
},
"ContainerInstanceStatus":{
@@ -1557,7 +1561,10 @@
"versionInfo":{"shape":"VersionInfo"},
"containerInstanceArn":{"shape":"String"},
"attributes":{"shape":"Attributes"},
- "tags":{"shape":"Tags"}
+ "tags":{"shape":"Tags"},
+ "clientToken":{
+ "shape":"String"
+ }
}
},
"RegisterContainerInstanceResponse":{
@@ -1658,6 +1665,21 @@
"DAEMON"
]
},
+ "Secret":{
+ "type":"structure",
+ "required":[
+ "name",
+ "valueFrom"
+ ],
+ "members":{
+ "name":{"shape":"String"},
+ "valueFrom":{"shape":"String"}
+ }
+ },
+ "SecretList":{
+ "type":"list",
+ "member":{"shape":"Secret"}
+ },
"ServerException":{
"type":"structure",
"members":{
@@ -2144,4 +2166,4 @@
"member":{"shape":"Volume"}
}
}
-}
+}
\ No newline at end of file
diff --git a/agent/ecs_client/model/api/docs-2.json b/agent/ecs_client/model/api/docs-2.json
index 6b1190e975d..782bf92ee85 100644
--- a/agent/ecs_client/model/api/docs-2.json
+++ b/agent/ecs_client/model/api/docs-2.json
@@ -954,6 +954,18 @@
"Service$schedulingStrategy": "
The scheduling strategy to use for the service. For more information, see Services.
There are two service scheduler strategies available:
-
REPLICA
-The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions.
-
DAEMON
-The daemon scheduling strategy deploys exactly one task on each container instance in your cluster. When using this strategy, do not specify a desired number of tasks or any task placement strategies.
Fargate tasks do not support the DAEMON
scheduling strategy.
"
}
},
+ "Secret": {
+ "base": null,
+ "refs": {
+ "SecretList$member": null
+ }
+ },
+ "SecretList": {
+ "base": null,
+ "refs": {
+ "ContainerDefinition$secrets": null
+ }
+ },
"ServerException": {
"base": "These errors are usually caused by a server issue.
",
"refs": {
@@ -1162,6 +1174,8 @@
"RunTaskRequest$startedBy": "An optional tag specified when a task is started. For example if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy
parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy
value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter contains the deployment ID of the service that starts it.
",
"RunTaskRequest$group": "The name of the task group to associate with the task. The default value is the family name of the task definition (for example, family:my-family-name).
",
"RunTaskRequest$platformVersion": "The platform version on which to run your task. If one is not specified, the latest version is used by default.
",
+ "Secret$name": null,
+ "Secret$valueFrom": null,
"ServerException$message": null,
"Service$serviceArn": "The ARN that identifies the service. The ARN contains the arn:aws:ecs
namespace, followed by the Region of the service, the AWS account ID of the service owner, the service
namespace, and then the service name. For example, arn:aws:ecs:region:012345678910:service/my-service
.
",
"Service$serviceName": "The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a Region or across multiple Regions.
",
diff --git a/agent/ecs_client/model/ecs/api.go b/agent/ecs_client/model/ecs/api.go
index 9bac3800f87..27803f25769 100644
--- a/agent/ecs_client/model/ecs/api.go
+++ b/agent/ecs_client/model/ecs/api.go
@@ -4566,6 +4566,8 @@ type ContainerDefinition struct {
// The private repository authentication credentials to use.
RepositoryCredentials *RepositoryCredentials `locationName:"repositoryCredentials" type:"structure"`
+ Secrets []*Secret `locationName:"secrets" type:"list"`
+
SystemControls []*SystemControl `locationName:"systemControls" type:"list"`
// A list of ulimits to set in the container. This parameter maps to Ulimits
@@ -4645,6 +4647,16 @@ func (s *ContainerDefinition) Validate() error {
invalidParams.AddNested("RepositoryCredentials", err.(request.ErrInvalidParams))
}
}
+ if s.Secrets != nil {
+ for i, v := range s.Secrets {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Secrets", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
if s.Ulimits != nil {
for i, v := range s.Ulimits {
if v == nil {
@@ -4824,6 +4836,12 @@ func (s *ContainerDefinition) SetRepositoryCredentials(v *RepositoryCredentials)
return s
}
+// SetSecrets sets the Secrets field's value.
+func (s *ContainerDefinition) SetSecrets(v []*Secret) *ContainerDefinition {
+ s.Secrets = v
+ return s
+}
+
// SetSystemControls sets the SystemControls field's value.
func (s *ContainerDefinition) SetSystemControls(v []*SystemControl) *ContainerDefinition {
s.SystemControls = v
@@ -4875,6 +4893,8 @@ type ContainerInstance struct {
// agent at instance registration or manually with the PutAttributes operation.
Attributes []*Attribute `locationName:"attributes" type:"list"`
+ ClientToken *string `locationName:"clientToken" type:"string"`
+
// The Amazon Resource Name (ARN) of the container instance. The ARN contains
// the arn:aws:ecs namespace, followed by the Region of the container instance,
// the AWS account ID of the container instance owner, the container-instance
@@ -4967,6 +4987,12 @@ func (s *ContainerInstance) SetAttributes(v []*Attribute) *ContainerInstance {
return s
}
+// SetClientToken sets the ClientToken field's value.
+func (s *ContainerInstance) SetClientToken(v string) *ContainerInstance {
+ s.ClientToken = &v
+ return s
+}
+
// SetContainerInstanceArn sets the ContainerInstanceArn field's value.
func (s *ContainerInstance) SetContainerInstanceArn(v string) *ContainerInstance {
s.ContainerInstanceArn = &v
@@ -8674,6 +8700,8 @@ type RegisterContainerInstanceInput struct {
// The container instance attributes that this container instance supports.
Attributes []*Attribute `locationName:"attributes" type:"list"`
+ ClientToken *string `locationName:"clientToken" type:"string"`
+
// The short name or full Amazon Resource Name (ARN) of the cluster with which
// to register your container instance. If you do not specify a cluster, the
// default cluster is assumed.
@@ -8747,6 +8775,12 @@ func (s *RegisterContainerInstanceInput) SetAttributes(v []*Attribute) *Register
return s
}
+// SetClientToken sets the ClientToken field's value.
+func (s *RegisterContainerInstanceInput) SetClientToken(v string) *RegisterContainerInstanceInput {
+ s.ClientToken = &v
+ return s
+}
+
// SetCluster sets the Cluster field's value.
func (s *RegisterContainerInstanceInput) SetCluster(v string) *RegisterContainerInstanceInput {
s.Cluster = &v
@@ -9409,6 +9443,54 @@ func (s *RunTaskOutput) SetTasks(v []*Task) *RunTaskOutput {
return s
}
+type Secret struct {
+ _ struct{} `type:"structure"`
+
+ // Name is a required field
+ Name *string `locationName:"name" type:"string" required:"true"`
+
+ // ValueFrom is a required field
+ ValueFrom *string `locationName:"valueFrom" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Secret) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Secret) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Secret) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Secret"}
+ if s.Name == nil {
+ invalidParams.Add(request.NewErrParamRequired("Name"))
+ }
+ if s.ValueFrom == nil {
+ invalidParams.Add(request.NewErrParamRequired("ValueFrom"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetName sets the Name field's value.
+func (s *Secret) SetName(v string) *Secret {
+ s.Name = &v
+ return s
+}
+
+// SetValueFrom sets the ValueFrom field's value.
+func (s *Secret) SetValueFrom(v string) *Secret {
+ s.ValueFrom = &v
+ return s
+}
+
// Details on a service within a cluster
type Service struct {
_ struct{} `type:"structure"`
diff --git a/agent/engine/docker_image_manager.go b/agent/engine/docker_image_manager.go
index 84917824770..b8877d3cdaa 100644
--- a/agent/engine/docker_image_manager.go
+++ b/agent/engine/docker_image_manager.go
@@ -59,6 +59,7 @@ type dockerImageManager struct {
numImagesToDelete int
imageCleanupTimeInterval time.Duration
imagePullBehavior config.ImagePullBehaviorType
+ imageCleanupExclusionList []string
}
// ImageStatesForDeletion is used for implementing the sort interface
@@ -67,12 +68,13 @@ type ImageStatesForDeletion []*image.ImageState
// NewImageManager returns a new ImageManager
func NewImageManager(cfg *config.Config, client dockerapi.DockerClient, state dockerstate.TaskEngineState) ImageManager {
return &dockerImageManager{
- client: client,
- state: state,
- minimumAgeBeforeDeletion: cfg.MinimumImageDeletionAge,
- numImagesToDelete: cfg.NumImagesToDeletePerCycle,
- imageCleanupTimeInterval: cfg.ImageCleanupInterval,
- imagePullBehavior: cfg.ImagePullBehavior,
+ client: client,
+ state: state,
+ minimumAgeBeforeDeletion: cfg.MinimumImageDeletionAge,
+ numImagesToDelete: cfg.NumImagesToDeletePerCycle,
+ imageCleanupTimeInterval: cfg.ImageCleanupInterval,
+ imagePullBehavior: cfg.ImagePullBehavior,
+ imageCleanupExclusionList: cfg.ImageCleanupExclusionList,
}
}
@@ -301,11 +303,7 @@ func (imageManager *dockerImageManager) removeUnusedImages(ctx context.Context)
imageManager.updateLock.Lock()
defer imageManager.updateLock.Unlock()
- imageManager.imageStatesConsideredForDeletion = make(map[string]*image.ImageState)
- seelog.Info("Begin building map of eligible unused images for deletion")
- for _, imageState := range imageManager.getAllImageStates() {
- imageManager.imageStatesConsideredForDeletion[imageState.Image.ImageID] = imageState
- }
+ imageManager.imageStatesConsideredForDeletion = imageManager.imagesConsiderForDeletion(imageManager.getAllImageStates())
for i := 0; i < imageManager.numImagesToDelete; i++ {
err := imageManager.removeLeastRecentlyUsedImage(ctx)
if err != nil {
@@ -315,6 +313,34 @@ func (imageManager *dockerImageManager) removeUnusedImages(ctx context.Context)
}
}
+func (imageManager *dockerImageManager) imagesConsiderForDeletion(allImageStates []*image.ImageState) map[string]*image.ImageState {
+ var imagesConsiderForDeletionMap = make(map[string]*image.ImageState)
+ seelog.Info("Begin building map of eligible unused images for deletion")
+ for _, imageState := range allImageStates {
+ if imageManager.isExcludedFromCleanup(imageState) {
+ //imageState that we want to keep
+ seelog.Infof("Image excluded from deletion: [%s]", imageState.String())
+ } else {
+ imagesConsiderForDeletionMap[imageState.Image.ImageID] = imageState
+ }
+ }
+ return imagesConsiderForDeletionMap
+}
+
+func (imageManager *dockerImageManager) isExcludedFromCleanup(imageState *image.ImageState) bool {
+ var encountered = make(map[string]bool)
+ for _, imageNotDelete := range imageManager.imageCleanupExclusionList {
+ encountered[imageNotDelete] = true
+ }
+
+ for _, name := range imageState.Image.Names {
+ if encountered[name] {
+ return true
+ }
+ }
+ return false
+}
+
func (imageManager *dockerImageManager) removeLeastRecentlyUsedImage(ctx context.Context) error {
leastRecentlyUsedImage := imageManager.getUnusedImageForDeletion()
if leastRecentlyUsedImage == nil {
diff --git a/agent/engine/docker_image_manager_test.go b/agent/engine/docker_image_manager_test.go
index c17e325a869..b0170ea3bb8 100644
--- a/agent/engine/docker_image_manager_test.go
+++ b/agent/engine/docker_image_manager_test.go
@@ -140,8 +140,8 @@ func TestRecordContainerReferenceInspectError(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -174,8 +174,8 @@ func TestRecordContainerReferenceWithNoImageName(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -401,8 +401,8 @@ func TestRemoveContainerReferenceFromImageStateWithNoReference(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -437,8 +437,8 @@ func TestGetCandidateImagesForDeletionImageNoImageState(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -457,8 +457,8 @@ func TestGetCandidateImagesForDeletionImageJustPulled(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -482,8 +482,8 @@ func TestGetCandidateImagesForDeletionImageHasContainerReference(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -523,8 +523,8 @@ func TestGetCandidateImagesForDeletionImageHasMoreContainerReferences(t *testing
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -572,6 +572,110 @@ func TestGetCandidateImagesForDeletionImageHasMoreContainerReferences(t *testing
}
}
+func TestImageCleanupExclusionListWithSingleName(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ client := mock_dockerapi.NewMockDockerClient(ctrl)
+ sourceImageA := &image.Image{
+ ImageID: "sha256:qwerty1",
+ Names: []string{"a"},
+ }
+ sourceImageB := &image.Image{
+ ImageID: "sha256:qwerty2",
+ Names: []string{"b"},
+ }
+ sourceImageC := &image.Image{
+ ImageID: "sha256:qwerty3",
+ Names: []string{"c"},
+ }
+ ImageStateA := &image.ImageState{
+ Image: sourceImageA,
+ PulledAt: time.Now().AddDate(0, -2, 0),
+ }
+ ImageStateB := &image.ImageState{
+ Image: sourceImageB,
+ PulledAt: time.Now().AddDate(0, -2, 0),
+ }
+ ImageStateC := &image.ImageState{
+ Image: sourceImageC,
+ PulledAt: time.Now().AddDate(0, -2, 0),
+ }
+ imageManager := &dockerImageManager{
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
+ minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
+ numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
+ imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
+ imageCleanupExclusionList: []string{"a", "c"},
+ imageStatesConsideredForDeletion: map[string]*image.ImageState{
+ "sha256:qwerty2": ImageStateB,
+ },
+ }
+ var testImageStates = []*image.ImageState{ImageStateA, ImageStateB, ImageStateC}
+ var testResult = imageManager.imagesConsiderForDeletion(testImageStates)
+
+ assert.Equal(t, 1, len(testResult), "Expected 1 image state to be returned for deletion")
+ if !reflect.DeepEqual(imageManager.imageStatesConsideredForDeletion, testResult) {
+ t.Error("Incorrect image return from getCandidateImagesForDeletionHelper function")
+ }
+}
+
+func TestImageCleanupExclusionListWithMultipleNames(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ client := mock_dockerapi.NewMockDockerClient(ctrl)
+ sourceImageA := &image.Image{
+ ImageID: "sha256:qwerty1",
+ Names: []string{"a", "b", "c"},
+ }
+ sourceImageB := &image.Image{
+ ImageID: "sha256:qwerty2",
+ Names: []string{"d", "e", "f"},
+ }
+ sourceImageC := &image.Image{
+ ImageID: "sha256:qwerty3",
+ Names: []string{"g", "h", "i"},
+ }
+ sourceImageD := &image.Image{
+ ImageID: "sha256:qwerty4",
+ Names: []string{"x", "y", "z"},
+ }
+ ImageStateA := &image.ImageState{
+ Image: sourceImageA,
+ PulledAt: time.Now().AddDate(0, -2, 0),
+ }
+ ImageStateB := &image.ImageState{
+ Image: sourceImageB,
+ PulledAt: time.Now().AddDate(0, -2, 0),
+ }
+ ImageStateC := &image.ImageState{
+ Image: sourceImageC,
+ PulledAt: time.Now().AddDate(0, -2, 0),
+ }
+ ImageStateD := &image.ImageState{
+ Image: sourceImageD,
+ PulledAt: time.Now().AddDate(0, -2, 0),
+ }
+ imageManager := &dockerImageManager{
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
+ minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
+ numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
+ imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
+ imageCleanupExclusionList: []string{"a", "d", "g"},
+ imageStatesConsideredForDeletion: map[string]*image.ImageState{
+ "sha256:qwerty4": ImageStateD,
+ },
+ }
+ var testImageStates = []*image.ImageState{ImageStateA, ImageStateB, ImageStateC, ImageStateD}
+ var testResult = imageManager.imagesConsiderForDeletion(testImageStates)
+
+ assert.Equal(t, 1, len(testResult), "Expected 1 image state to be returned for deletion")
+ if !reflect.DeepEqual(imageManager.imageStatesConsideredForDeletion, testResult) {
+ t.Error("Incorrect image return from getCandidateImagesForDeletionHelper function")
+ }
+}
+
func TestGetLeastRecentlyUsedImages(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -616,8 +720,8 @@ func TestGetLeastRecentlyUsedImagesLessThanFive(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -650,8 +754,8 @@ func TestRemoveAlreadyExistingImageNameWithDifferentID(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -701,8 +805,8 @@ func TestImageCleanupHappyPath(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: 1 * time.Millisecond,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -753,8 +857,8 @@ func TestImageCleanupCannotRemoveImage(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -806,8 +910,8 @@ func TestImageCleanupRemoveImageById(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
@@ -1037,8 +1141,8 @@ func TestConcurrentRemoveUnusedImages(t *testing.T) {
client := mock_dockerapi.NewMockDockerClient(ctrl)
imageManager := &dockerImageManager{
- client: client,
- state: dockerstate.NewTaskEngineState(),
+ client: client,
+ state: dockerstate.NewTaskEngineState(),
minimumAgeBeforeDeletion: config.DefaultImageDeletionAge,
numImagesToDelete: config.DefaultNumImagesToDeletePerCycle,
imageCleanupTimeInterval: config.DefaultImageCleanupTimeInterval,
diff --git a/agent/engine/docker_task_engine.go b/agent/engine/docker_task_engine.go
index ed3987118a1..ea7bd624ebe 100644
--- a/agent/engine/docker_task_engine.go
+++ b/agent/engine/docker_task_engine.go
@@ -36,6 +36,7 @@ import (
"github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerstate"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
+ "github.com/aws/amazon-ecs-agent/agent/metrics"
"github.com/aws/amazon-ecs-agent/agent/statechange"
"github.com/aws/amazon-ecs-agent/agent/statemanager"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
@@ -415,6 +416,7 @@ func (engine *DockerTaskEngine) synchronizeContainerStatus(container *apicontain
// checkTaskState inspects the state of all containers within a task and writes
// their state to the managed task's container channel.
func (engine *DockerTaskEngine) checkTaskState(task *apitask.Task) {
+ defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("CHECK_TASK_STATE")()
taskContainers, ok := engine.state.ContainerMapByArn(task.Arn)
if !ok {
seelog.Warnf("Task engine [%s]: could not check task state; no task in state", task.Arn)
@@ -614,6 +616,7 @@ func (engine *DockerTaskEngine) StateChangeEvents() chan statechange.Event {
// AddTask starts tracking a task
func (engine *DockerTaskEngine) AddTask(task *apitask.Task) {
+ defer metrics.MetricsEngineGlobal.RecordTaskEngineMetric("ADD_TASK")()
err := task.PostUnmarshalTask(engine.cfg, engine.credentialsManager,
engine.resourceFields, engine.client, engine.ctx)
if err != nil {
@@ -857,8 +860,8 @@ func (engine *DockerTaskEngine) createContainer(task *apitask.Task, container *a
}
// apply secrets to container.Environment
- if container.ShouldCreateWithSSMSecret() {
- err := task.PopulateSSMSecrets(container)
+ if container.HasSecretAsEnv() {
+ err := task.PopulateSecretsAsEnv(container)
if err != nil {
return dockerapi.DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
diff --git a/agent/engine/docker_task_engine_test.go b/agent/engine/docker_task_engine_test.go
index db8006100f8..d9b60994233 100644
--- a/agent/engine/docker_task_engine_test.go
+++ b/agent/engine/docker_task_engine_test.go
@@ -55,6 +55,7 @@ import (
"github.com/aws/amazon-ecs-agent/agent/statemanager/mocks"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
"github.com/aws/amazon-ecs-agent/agent/taskresource/asmauth"
+ "github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret"
"github.com/aws/amazon-ecs-agent/agent/taskresource/mocks"
"github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret"
taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume"
@@ -729,7 +730,7 @@ func TestCreateContainerMergesLabels(t *testing.T) {
"com.amazonaws.ecs.task-definition-family": "myFamily",
"com.amazonaws.ecs.task-definition-version": "1",
"com.amazonaws.ecs.cluster": "",
- "key": "value",
+ "key": "value",
}
client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes()
client.EXPECT().CreateContainer(gomock.Any(), expectedConfig, gomock.Any(), gomock.Any(), gomock.Any())
@@ -2289,126 +2290,233 @@ func TestSynchronizeResource(t *testing.T) {
dockerTaskEngine.synchronizeState()
}
-func TestTaskSSMSecretsEnvironmentVariables(t *testing.T) {
- ctx, cancel := context.WithCancel(context.TODO())
- defer cancel()
- ctrl, client, mockTime, taskEngine, credentialsManager, _, _ := mocks(t, ctx, &defaultConfig)
- defer ctrl.Finish()
-
+func TestTaskSecretsEnvironmentVariables(t *testing.T) {
// metadata required for createContainer workflow validation
- ssmTaskARN := "ssmSecretsTask"
- ssmTaskFamily := "ssmSecretsTaskFamily"
- ssmTaskVersion := "1"
- ssmTaskContainerName := "ssmSecretsContainer"
+ taskARN := "secretsTask"
+ taskFamily := "secretsTaskFamily"
+ taskVersion := "1"
+ taskContainerName := "secretsContainer"
// metadata required for ssm secret resource validation
- secretName := "mySecret"
- secretValueFrom := "ssm/mySecret"
- secretRetrievedValue := "mySecretValue"
- secretRegion := "us-west-2"
+ ssmSecretName := "mySSMSecret"
+ ssmSecretValueFrom := "ssm/mySecret"
+ ssmSecretRetrievedValue := "mySSMSecretValue"
+ ssmSecretRegion := "us-west-2"
+
+ // metadata required for asm secret resource validation
+ asmSecretName := "myASMSecret"
+ asmSecretValueFrom := "asm/mySecret"
+ asmSecretRetrievedValue := "myASMSecretValue"
+ asmSecretRegion := "us-west-2"
+ asmSecretKey := asmSecretValueFrom + "_" + asmSecretRegion
- expectedEnvVar := secretName + "=" + secretRetrievedValue
+ ssmExpectedEnvVar := ssmSecretName + "=" + ssmSecretRetrievedValue
+ asmExpectedEnvVar := asmSecretName + "=" + asmSecretRetrievedValue
- secrets := []apicontainer.Secret{
+ testCases := []struct {
+ name string
+ secrets []apicontainer.Secret
+ ssmSecret apicontainer.Secret
+ asmSecret apicontainer.Secret
+ expectedEnv []string
+ }{
{
- Name: secretName,
- ValueFrom: secretValueFrom,
- Region: secretRegion,
- Type: "ENVIRONMENT_VARIABLES",
- Provider: "ssm",
+ name: "ASMSecretAsEnv",
+ secrets: []apicontainer.Secret{
+ {
+ Name: ssmSecretName,
+ ValueFrom: ssmSecretValueFrom,
+ Region: ssmSecretRegion,
+ Type: "MOUNT_POINT",
+ Provider: "ssm",
+ },
+ {
+ Name: asmSecretName,
+ ValueFrom: asmSecretValueFrom,
+ Region: asmSecretRegion,
+ Type: "ENVIRONMENT_VARIABLE",
+ Provider: "asm",
+ },
+ },
+ ssmSecret: apicontainer.Secret{
+ Name: ssmSecretName,
+ ValueFrom: ssmSecretValueFrom,
+ Region: ssmSecretRegion,
+ Type: "MOUNT_POINT",
+ Provider: "ssm",
+ },
+ asmSecret: apicontainer.Secret{
+ Name: asmSecretName,
+ ValueFrom: asmSecretValueFrom,
+ Region: asmSecretRegion,
+ Type: "ENVIRONMENT_VARIABLE",
+ Provider: "asm",
+ },
+ expectedEnv: []string{asmExpectedEnvVar},
},
- }
-
- // sample test
- testTask := &apitask.Task{
- Arn: ssmTaskARN,
- Family: ssmTaskFamily,
- Version: ssmTaskVersion,
- Containers: []*apicontainer.Container{
- {
- Name: ssmTaskContainerName,
- Secrets: secrets,
+ {
+ name: "SSMSecretAsEnv",
+ secrets: []apicontainer.Secret{
+ {
+ Name: ssmSecretName,
+ ValueFrom: ssmSecretValueFrom,
+ Region: ssmSecretRegion,
+ Type: "ENVIRONMENT_VARIABLE",
+ Provider: "ssm",
+ },
+ {
+ Name: asmSecretName,
+ ValueFrom: asmSecretValueFrom,
+ Region: asmSecretRegion,
+ Type: "MOUNT_POINT",
+ Provider: "asm",
+ },
+ },
+ ssmSecret: apicontainer.Secret{
+ Name: ssmSecretName,
+ ValueFrom: ssmSecretValueFrom,
+ Region: ssmSecretRegion,
+ Type: "ENVIRONMENT_VARIABLE",
+ Provider: "ssm",
},
+ asmSecret: apicontainer.Secret{
+ Name: asmSecretName,
+ ValueFrom: asmSecretValueFrom,
+ Region: asmSecretRegion,
+ Type: "MOUNT_POINT",
+ Provider: "asm",
+ },
+ expectedEnv: []string{ssmExpectedEnvVar},
},
}
- // metadata required for execution role authentication workflow
- credentialsID := "execution role"
- executionRoleCredentials := credentials.IAMRoleCredentials{
- CredentialsID: credentialsID,
- }
- taskIAMcreds := credentials.TaskIAMRoleCredentials{
- IAMRoleCredentials: executionRoleCredentials,
- }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
- // configure the task and container to use execution role
- testTask.SetExecutionRoleCredentialsID(credentialsID)
+ ctx, cancel := context.WithCancel(context.TODO())
+ defer cancel()
+ ctrl, client, mockTime, taskEngine, credentialsManager, _, _ := mocks(t, ctx, &defaultConfig)
+ defer ctrl.Finish()
- // validate base config
- expectedConfig, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion)
- if err != nil {
- t.Fatal(err)
- }
+ // sample test
+ testTask := &apitask.Task{
+ Arn: taskARN,
+ Family: taskFamily,
+ Version: taskVersion,
+ Containers: []*apicontainer.Container{
+ {
+ Name: taskContainerName,
+ Secrets: tc.secrets,
+ },
+ },
+ }
- expectedConfig.Labels = map[string]string{
- "com.amazonaws.ecs.task-arn": ssmTaskARN,
- "com.amazonaws.ecs.container-name": ssmTaskContainerName,
- "com.amazonaws.ecs.task-definition-family": ssmTaskFamily,
- "com.amazonaws.ecs.task-definition-version": ssmTaskVersion,
- "com.amazonaws.ecs.cluster": "",
- }
+ // metadata required for execution role authentication workflow
+ credentialsID := "execution role"
+ executionRoleCredentials := credentials.IAMRoleCredentials{
+ CredentialsID: credentialsID,
+ }
+ taskIAMcreds := credentials.TaskIAMRoleCredentials{
+ IAMRoleCredentials: executionRoleCredentials,
+ }
- // required to validate container config includes secrets as environment variables
- expectedConfig.Env = []string{expectedEnvVar}
+ // configure the task and container to use execution role
+ testTask.SetExecutionRoleCredentialsID(credentialsID)
- // required for validating ssm workflows
- ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl)
- mockSSMClient := mock_ssmiface.NewMockSSMClient(ctrl)
+ // validate base config
+ expectedConfig, err := testTask.DockerConfig(testTask.Containers[0], defaultDockerClientAPIVersion)
+ if err != nil {
+ t.Fatal(err)
+ }
- ssmRequirements := map[string][]apicontainer.Secret{
- secretRegion: secrets,
- }
+ expectedConfig.Labels = map[string]string{
+ "com.amazonaws.ecs.task-arn": taskARN,
+ "com.amazonaws.ecs.container-name": taskContainerName,
+ "com.amazonaws.ecs.task-definition-family": taskFamily,
+ "com.amazonaws.ecs.task-definition-version": taskVersion,
+ "com.amazonaws.ecs.cluster": "",
+ }
- ssmSecretRes := ssmsecret.NewSSMSecretResource(
- testTask.Arn,
- ssmRequirements,
- credentialsID,
- credentialsManager,
- ssmClientCreator)
+ // required to validate container config includes secrets as environment variables
+ expectedConfig.Env = tc.expectedEnv
- testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{
- ssmsecret.ResourceName: {ssmSecretRes},
- }
+ // required for validating ssm workflows
+ ssmClientCreator := mock_ssm_factory.NewMockSSMClientCreator(ctrl)
+ mockSSMClient := mock_ssmiface.NewMockSSMClient(ctrl)
- ssmClientOutput := &ssm.GetParametersOutput{
- InvalidParameters: []*string{},
- Parameters: []*ssm.Parameter{
- &ssm.Parameter{
- Name: aws.String(secretValueFrom),
- Value: aws.String(secretRetrievedValue),
- },
- },
- }
+ ssmRequirements := map[string][]apicontainer.Secret{
+ ssmSecretRegion: []apicontainer.Secret{
+ tc.ssmSecret,
+ },
+ }
- reqSecretNames := []*string{aws.String(secretValueFrom)}
+ ssmSecretRes := ssmsecret.NewSSMSecretResource(
+ testTask.Arn,
+ ssmRequirements,
+ credentialsID,
+ credentialsManager,
+ ssmClientCreator)
- credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return(taskIAMcreds, true)
- ssmClientCreator.EXPECT().NewSSMClient(region, executionRoleCredentials).Return(mockSSMClient)
+ // required for validating asm workflows
+ asmClientCreator := mock_asm_factory.NewMockClientCreator(ctrl)
+ mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl)
- mockSSMClient.EXPECT().GetParameters(gomock.Any()).Do(func(in *ssm.GetParametersInput) {
- assert.Equal(t, in.Names, reqSecretNames)
- }).Return(ssmClientOutput, nil).Times(1)
+ asmRequirements := map[string]apicontainer.Secret{
+ asmSecretKey: tc.asmSecret,
+ }
- require.NoError(t, ssmSecretRes.Create())
+ asmSecretRes := asmsecret.NewASMSecretResource(
+ testTask.Arn,
+ asmRequirements,
+ credentialsID,
+ credentialsManager,
+ asmClientCreator)
- mockTime.EXPECT().Now().AnyTimes()
- client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes()
+ testTask.ResourcesMapUnsafe = map[string][]taskresource.TaskResource{
+ ssmsecret.ResourceName: {ssmSecretRes},
+ asmsecret.ResourceName: {asmSecretRes},
+ }
- // test validates that the expectedConfig includes secrets are appended as
- // environment varibles
- client.EXPECT().CreateContainer(gomock.Any(), expectedConfig, gomock.Any(), gomock.Any(), gomock.Any())
+ ssmClientOutput := &ssm.GetParametersOutput{
+ InvalidParameters: []*string{},
+ Parameters: []*ssm.Parameter{
+ &ssm.Parameter{
+ Name: aws.String(ssmSecretValueFrom),
+ Value: aws.String(ssmSecretRetrievedValue),
+ },
+ },
+ }
+
+ asmClientOutput := &secretsmanager.GetSecretValueOutput{
+ SecretString: aws.String(asmSecretRetrievedValue),
+ }
- ret := taskEngine.(*DockerTaskEngine).createContainer(testTask, testTask.Containers[0])
+ reqSecretNames := []*string{aws.String(ssmSecretValueFrom)}
- assert.Nil(t, ret.Error)
+ credentialsManager.EXPECT().GetTaskCredentials(credentialsID).Return(taskIAMcreds, true).Times(2)
+ ssmClientCreator.EXPECT().NewSSMClient(region, executionRoleCredentials).Return(mockSSMClient)
+ asmClientCreator.EXPECT().NewASMClient(region, executionRoleCredentials).Return(mockASMClient)
+
+ mockSSMClient.EXPECT().GetParameters(gomock.Any()).Do(func(in *ssm.GetParametersInput) {
+ assert.Equal(t, in.Names, reqSecretNames)
+ }).Return(ssmClientOutput, nil).Times(1)
+
+ mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) {
+ assert.Equal(t, aws.StringValue(in.SecretId), asmSecretValueFrom)
+ }).Return(asmClientOutput, nil).Times(1)
+
+ require.NoError(t, ssmSecretRes.Create())
+ require.NoError(t, asmSecretRes.Create())
+
+ mockTime.EXPECT().Now().AnyTimes()
+ client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil).AnyTimes()
+
+ // test validates that the expectedConfig includes secrets are appended as
+ // environment varibles
+ client.EXPECT().CreateContainer(gomock.Any(), expectedConfig, gomock.Any(), gomock.Any(), gomock.Any())
+ ret := taskEngine.(*DockerTaskEngine).createContainer(testTask, testTask.Containers[0])
+ assert.Nil(t, ret.Error)
+
+ })
+ }
}
diff --git a/agent/engine/engine_unix_integ_test.go b/agent/engine/engine_unix_integ_test.go
index e6bd9308891..e98e37642e2 100644
--- a/agent/engine/engine_unix_integ_test.go
+++ b/agent/engine/engine_unix_integ_test.go
@@ -115,29 +115,29 @@ func createNamespaceSharingTask(arn, pidMode, ipcMode, testImage string, theComm
DesiredStatusUnsafe: apitaskstatus.TaskRunning,
Containers: []*apicontainer.Container{
&apicontainer.Container{
- Name: "container0",
- Image: testImage,
- DesiredStatusUnsafe: apicontainerstatus.ContainerRunning,
- CPU: 100,
- Memory: 80,
+ Name: "container0",
+ Image: testImage,
+ DesiredStatusUnsafe: apicontainerstatus.ContainerRunning,
+ CPU: 100,
+ Memory: 80,
TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
},
&apicontainer.Container{
- Name: "container1",
- Image: testBusyboxImage,
- Command: theCommand,
- DesiredStatusUnsafe: apicontainerstatus.ContainerRunning,
- CPU: 100,
- Memory: 80,
+ Name: "container1",
+ Image: testBusyboxImage,
+ Command: theCommand,
+ DesiredStatusUnsafe: apicontainerstatus.ContainerRunning,
+ CPU: 100,
+ Memory: 80,
TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
},
&apicontainer.Container{
- Name: "container2",
- Image: testBusyboxImage,
- Command: theCommand,
- DesiredStatusUnsafe: apicontainerstatus.ContainerRunning,
- CPU: 100,
- Memory: 80,
+ Name: "container2",
+ Image: testBusyboxImage,
+ Command: theCommand,
+ DesiredStatusUnsafe: apicontainerstatus.ContainerRunning,
+ CPU: 100,
+ Memory: 80,
TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet),
},
},
diff --git a/agent/eventhandler/task_handler.go b/agent/eventhandler/task_handler.go
index c74760b5d6b..9db748d3355 100644
--- a/agent/eventhandler/task_handler.go
+++ b/agent/eventhandler/task_handler.go
@@ -25,6 +25,7 @@ import (
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerstate"
+ "github.com/aws/amazon-ecs-agent/agent/metrics"
"github.com/aws/amazon-ecs-agent/agent/statechange"
"github.com/aws/amazon-ecs-agent/agent/statemanager"
"github.com/aws/amazon-ecs-agent/agent/utils"
@@ -267,6 +268,7 @@ func (handler *TaskHandler) getTaskEventsUnsafe(event *sendableEvent) *taskSenda
// Continuously retries sending an event until it succeeds, sleeping between each
// attempt
func (handler *TaskHandler) submitTaskEvents(taskEvents *taskSendableEvents, client api.ECSClient, taskARN string) {
+ defer metrics.MetricsEngineGlobal.RecordECSClientMetric("SUBMIT_TASK_EVENTS")()
defer handler.removeTaskEvents(taskARN)
backoff := utils.NewSimpleBackoff(submitStateBackoffMin, submitStateBackoffMax,
diff --git a/agent/functional_tests/generators/simpletests.go b/agent/functional_tests/generators/simpletests.go
index 7a26873dfca..4b70cd1c6df 100644
--- a/agent/functional_tests/generators/simpletests.go
+++ b/agent/functional_tests/generators/simpletests.go
@@ -30,8 +30,7 @@ import (
)
// TODO: add more awsvpc functional tests using simple test template
-var simpleTestPattern = `
-// +build functional,%s
+var simpleTestPattern = `// +build functional,%s
// Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
diff --git a/agent/functional_tests/testdata/taskdefinitions/ssmsecrets-environment-variables/task-definition.json b/agent/functional_tests/testdata/taskdefinitions/ssmsecrets-environment-variables/task-definition.json
new file mode 100644
index 00000000000..4cb10b3262d
--- /dev/null
+++ b/agent/functional_tests/testdata/taskdefinitions/ssmsecrets-environment-variables/task-definition.json
@@ -0,0 +1,20 @@
+{
+ "family": "ssm-integration",
+ "executionRoleArn": "$$$EXECUTION_ROLE$$$",
+ "containerDefinitions": [
+ {
+ "name": "ssmsecrets-environment-variables",
+ "image": "busybox:latest",
+ "cpu": 100,
+ "memory": 100,
+ "command": ["sh", "-c", "if echo $SECRET_NAME | grep \"secretValue\"; then exit 42; else exit 1; fi"
+ ],
+ "secrets": [
+ {
+ "name": "$$$SECRET_NAME$$$",
+ "valueFrom": "$$$SSM_PARAMETER_NAME$$$"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/agent/functional_tests/tests/functionaltests_test.go b/agent/functional_tests/tests/functionaltests_test.go
index 90bc8c44d28..247b1404510 100644
--- a/agent/functional_tests/tests/functionaltests_test.go
+++ b/agent/functional_tests/tests/functionaltests_test.go
@@ -644,8 +644,6 @@ func testV3TaskEndpoint(t *testing.T, taskName, containerName, networkMode, awsl
}
func TestContainerInstanceTags(t *testing.T) {
- t.Skip("Skipping TestContainerInstanceTags")
-
// We need long container instance ARN for tagging APIs, PutAccountSettingInput
// will enable long container instance ARN.
putAccountSettingInput := ecsapi.PutAccountSettingInput{
@@ -696,8 +694,7 @@ func TestContainerInstanceTags(t *testing.T) {
}
agent := RunAgent(t, agentOptions)
defer agent.Cleanup()
- // Change the required Agent version to v1.22.0 during 1.22.0 staging or after staging.
- agent.RequireVersion(">=1.21.0")
+ agent.RequireVersion(">=1.22.0")
// Verify the tags are registered.
ListTagsForResourceInput := ecsapi.ListTagsForResourceInput{
diff --git a/agent/functional_tests/tests/functionaltests_unix_test.go b/agent/functional_tests/tests/functionaltests_unix_test.go
index cc67717342b..500b096bbee 100644
--- a/agent/functional_tests/tests/functionaltests_unix_test.go
+++ b/agent/functional_tests/tests/functionaltests_unix_test.go
@@ -22,6 +22,7 @@ import (
"os"
"path/filepath"
"regexp"
+ "runtime"
"strconv"
"strings"
"testing"
@@ -35,12 +36,12 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/aws-sdk-go/service/secretsmanager"
+ "github.com/aws/aws-sdk-go/service/ssm"
"github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
docker "github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -547,6 +548,10 @@ func TestFluentdTag(t *testing.T) {
// tag was added in docker 1.9.0
RequireDockerVersion(t, ">=1.9.0")
+ if runtime.GOOS == "arm64" {
+ t.Skip()
+ }
+
fluentdDriverTest("fluentd-tag", t)
}
@@ -557,11 +562,16 @@ func TestFluentdLogTag(t *testing.T) {
RequireDockerVersion(t, ">=1.8.0")
RequireDockerVersion(t, "<1.12.0")
+ if runtime.GOOS == "arm64" {
+ t.Skip()
+ }
+
fluentdDriverTest("fluentd-log-tag", t)
}
func fluentdDriverTest(taskDefinition string, t *testing.T) {
ctx := context.TODO()
+
agentOptions := AgentOptions{
ExtraEnvironment: map[string]string{
"ECS_AVAILABLE_LOGGING_DRIVERS": `["fluentd"]`,
@@ -1083,3 +1093,164 @@ func TestTaskIPCNamespaceSharing(t *testing.T) {
require.NoError(t, sErr, "Error waiting for ipc-namespace-task-share task to stop")
assert.Equal(t, 2, rExitCode, "Container could see IPC resource, but shouldn't")
}
+
+// TestSSMSecretsNonEncryptedParameter tests the workflow for retrieving secrets from SSM Parameter Store,
+// here secret is a non encrypted parameter
+func TestSSMSecretsNonEncryptedParameterARN(t *testing.T) {
+
+ if os.Getenv("TEST_DISABLE_EXECUTION_ROLE") == "true" {
+ t.Skip("TEST_DISABLE_EXECUTION_ROLE was set to true")
+ }
+
+ executionRole := os.Getenv("ECS_FTS_EXECUTION_ROLE")
+ // execution role arn is following the pattern arn:aws:iam::accountId:role/***
+ executionRoleArr := strings.Split(executionRole, ":")
+ partition := executionRoleArr[1]
+ accountId := executionRoleArr[4]
+
+ parameterName := "FunctionalTest-SSMSecretsString"
+ secretName := "SECRET_NAME"
+ region := *ECS.Config.Region
+ ssmClient := ssm.New(session.New(), aws.NewConfig().WithRegion(region))
+ input := &ssm.PutParameterInput{
+ Description: aws.String("Resource created for the ECS Agent Functional Test: TestSSMSecretsNonEncryptedParameter"),
+ Name: aws.String(parameterName),
+ Value: aws.String("secretValue"),
+ Type: aws.String("String"),
+ }
+
+ // create parameter in parameter store if it does not exist
+ _, err := ssmClient.PutParameter(input)
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ switch aerr.Code() {
+ case ssm.ErrCodeParameterAlreadyExists:
+ t.Logf("Parameter %v already exists in SSM Parameter Store", parameterName)
+ break
+ default:
+ require.NoError(t, err, "SSM PutParameter call failed")
+ }
+ }
+ }
+
+ agent := RunAgent(t, nil)
+ defer agent.Cleanup()
+
+ agent.RequireVersion(">=1.22.0")
+
+ tdOverrides := make(map[string]string)
+ tdOverrides["$$$SECRET_NAME$$$"] = secretName
+
+ arn := fmt.Sprintf("arn:%s:ssm:%s:%s:parameter/%s", partition, region, accountId, parameterName)
+ tdOverrides["$$$SSM_PARAMETER_NAME$$$"] = arn
+ tdOverrides["$$$EXECUTION_ROLE$$$"] = executionRole
+
+ task, err := agent.StartTaskWithTaskDefinitionOverrides(t, "ssmsecrets-environment-variables", tdOverrides)
+ require.NoError(t, err, "Failed to start task for ssmsecrets environment variables")
+
+ err = task.WaitStopped(waitTaskStateChangeDuration)
+ require.NoError(t, err)
+ exitCode, _ := task.ContainerExitcode("ssmsecrets-environment-variables")
+ assert.Equal(t, 42, exitCode, fmt.Sprintf("Expected exit code of 42; got %d", exitCode))
+}
+
+// TestSSMSecretsEncryptedParameter tests the workflow for retrieving secrets from SSM Parameter Store,
+// here secret is an encrypted parameter
+func TestSSMSecretsEncryptedParameter(t *testing.T) {
+
+ if os.Getenv("TEST_DISABLE_EXECUTION_ROLE") == "true" {
+ t.Skip("TEST_DISABLE_EXECUTION_ROLE was set to true")
+ }
+
+ parameterName := "FunctionalTest-SSMSecretsSecureString"
+ secretName := "SECRET_NAME"
+ ssmClient := ssm.New(session.New(), aws.NewConfig().WithRegion(*ECS.Config.Region))
+ input := &ssm.PutParameterInput{
+ Description: aws.String("Resource created for the ECS Agent Functional Test: TestSSMSecretsEncryptedParameter"),
+ Name: aws.String(parameterName),
+ Value: aws.String("secretValue"),
+ Type: aws.String("SecureString"),
+ }
+
+ // create parameter in parameter store if it does not exist
+ _, err := ssmClient.PutParameter(input)
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ switch aerr.Code() {
+ case ssm.ErrCodeParameterAlreadyExists:
+ t.Logf("Parameter %v already exists in SSM Parameter Store", parameterName)
+ break
+ default:
+ require.NoError(t, err, "SSM PutParameter call failed")
+ }
+ }
+ }
+
+ agent := RunAgent(t, nil)
+ defer agent.Cleanup()
+
+ agent.RequireVersion(">=1.22.0")
+
+ tdOverrides := make(map[string]string)
+ tdOverrides["$$$SECRET_NAME$$$"] = secretName
+ tdOverrides["$$$SSM_PARAMETER_NAME$$$"] = parameterName
+ tdOverrides["$$$EXECUTION_ROLE$$$"] = os.Getenv("ECS_FTS_EXECUTION_ROLE")
+
+ task, err := agent.StartTaskWithTaskDefinitionOverrides(t, "ssmsecrets-environment-variables", tdOverrides)
+ require.NoError(t, err, "Failed to start task for ssmsecrets environment variables")
+
+ err = task.WaitStopped(waitTaskStateChangeDuration)
+ require.NoError(t, err)
+ exitCode, _ := task.ContainerExitcode("ssmsecrets-environment-variables")
+ assert.Equal(t, 42, exitCode, fmt.Sprintf("Expected exit code of 42; got %d", exitCode))
+}
+
+// TestSSMSecretsEncryptedASMSecrets tests the workflow for retrieving secrets from SSM Parameter Store,
+// here secret is a secret in secrets manager passing through parameter store
+func TestSSMSecretsEncryptedASMSecrets(t *testing.T) {
+
+ if os.Getenv("TEST_DISABLE_EXECUTION_ROLE") == "true" {
+ t.Skip("TEST_DISABLE_EXECUTION_ROLE was set to true")
+ }
+
+ parameterName := "/aws/reference/secretsmanager/FunctionalTest-SSMSecretsSecretFromASM"
+ secretName := "SECRET_NAME"
+ asmClient := secretsmanager.New(session.New(), aws.NewConfig().WithRegion(*ECS.Config.Region))
+ input := &secretsmanager.CreateSecretInput{
+ Description: aws.String("Resource created for the ECS Agent Functional Test: TestSSMSecretsEncryptedASMSecrets"),
+ Name: aws.String("FunctionalTest-SSMSecretsSecretFromASM"),
+ SecretString: aws.String("secretValue"),
+ }
+
+ // create parameter in parameter store if it does not exist
+ _, err := asmClient.CreateSecret(input)
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ switch aerr.Code() {
+ case secretsmanager.ErrCodeResourceExistsException:
+ t.Logf("Secret FunctionalTest-SSMSecretsSecretFromASM already exists in AWS Secrets Manager")
+ break
+ default:
+ require.NoError(t, err, "Secrets Manager CreateSecret call failed")
+ }
+ }
+ }
+
+ agent := RunAgent(t, nil)
+ defer agent.Cleanup()
+
+ agent.RequireVersion(">=1.22.0")
+
+ tdOverrides := make(map[string]string)
+ tdOverrides["$$$SECRET_NAME$$$"] = secretName
+ tdOverrides["$$$SSM_PARAMETER_NAME$$$"] = parameterName
+ tdOverrides["$$$EXECUTION_ROLE$$$"] = os.Getenv("ECS_FTS_EXECUTION_ROLE")
+
+ task, err := agent.StartTaskWithTaskDefinitionOverrides(t, "ssmsecrets-environment-variables", tdOverrides)
+ require.NoError(t, err, "Failed to start task for ssmsecrets environment variables")
+
+ err = task.WaitStopped(waitTaskStateChangeDuration)
+ require.NoError(t, err)
+ exitCode, _ := task.ContainerExitcode("ssmsecrets-environment-variables")
+ assert.Equal(t, 42, exitCode, fmt.Sprintf("Expected exit code of 42; got %d", exitCode))
+}
diff --git a/agent/handlers/task_server_setup.go b/agent/handlers/task_server_setup.go
index c05558ba537..b1b64aa894c 100644
--- a/agent/handlers/task_server_setup.go
+++ b/agent/handlers/task_server_setup.go
@@ -49,7 +49,8 @@ func taskServerSetup(credentialsManager credentials.Manager,
cluster string,
statsEngine stats.Engine,
steadyStateRate int,
- burstRate int) *http.Server {
+ burstRate int,
+ availabilityZone string) *http.Server {
muxRouter := mux.NewRouter()
// Set this so that for request like "/v3//metadata/task", the Agent will pass
@@ -59,9 +60,9 @@ func taskServerSetup(credentialsManager credentials.Manager,
muxRouter.HandleFunc(v1.CredentialsPath,
v1.CredentialsHandler(credentialsManager, auditLogger))
- v2HandlersSetup(muxRouter, state, statsEngine, cluster, credentialsManager, auditLogger)
+ v2HandlersSetup(muxRouter, state, statsEngine, cluster, credentialsManager, auditLogger, availabilityZone)
- v3HandlersSetup(muxRouter, state, statsEngine, cluster)
+ v3HandlersSetup(muxRouter, state, statsEngine, cluster, availabilityZone)
limiter := tollbooth.NewLimiter(int64(steadyStateRate), nil)
limiter.SetOnLimitReached(handlersutils.LimitReachedHandler(auditLogger))
@@ -93,11 +94,12 @@ func v2HandlersSetup(muxRouter *mux.Router,
statsEngine stats.Engine,
cluster string,
credentialsManager credentials.Manager,
- auditLogger audit.AuditLogger) {
+ auditLogger audit.AuditLogger,
+ availabilityZone string) {
muxRouter.HandleFunc(v2.CredentialsPath, v2.CredentialsHandler(credentialsManager, auditLogger))
- muxRouter.HandleFunc(v2.ContainerMetadataPath, v2.TaskContainerMetadataHandler(state, cluster))
- muxRouter.HandleFunc(v2.TaskMetadataPath, v2.TaskContainerMetadataHandler(state, cluster))
- muxRouter.HandleFunc(v2.TaskMetadataPathWithSlash, v2.TaskContainerMetadataHandler(state, cluster))
+ muxRouter.HandleFunc(v2.ContainerMetadataPath, v2.TaskContainerMetadataHandler(state, cluster, availabilityZone))
+ muxRouter.HandleFunc(v2.TaskMetadataPath, v2.TaskContainerMetadataHandler(state, cluster, availabilityZone))
+ muxRouter.HandleFunc(v2.TaskMetadataPathWithSlash, v2.TaskContainerMetadataHandler(state, cluster, availabilityZone))
muxRouter.HandleFunc(v2.ContainerStatsPath, v2.TaskContainerStatsHandler(state, statsEngine))
muxRouter.HandleFunc(v2.TaskStatsPath, v2.TaskContainerStatsHandler(state, statsEngine))
muxRouter.HandleFunc(v2.TaskStatsPathWithSlash, v2.TaskContainerStatsHandler(state, statsEngine))
@@ -107,9 +109,10 @@ func v2HandlersSetup(muxRouter *mux.Router,
func v3HandlersSetup(muxRouter *mux.Router,
state dockerstate.TaskEngineState,
statsEngine stats.Engine,
- cluster string) {
+ cluster string,
+ availabilityZone string) {
muxRouter.HandleFunc(v3.ContainerMetadataPath, v3.ContainerMetadataHandler(state))
- muxRouter.HandleFunc(v3.TaskMetadataPath, v3.TaskMetadataHandler(state, cluster))
+ muxRouter.HandleFunc(v3.TaskMetadataPath, v3.TaskMetadataHandler(state, cluster, availabilityZone))
muxRouter.HandleFunc(v3.ContainerStatsPath, v3.ContainerStatsHandler(state, statsEngine))
muxRouter.HandleFunc(v3.TaskStatsPath, v3.TaskStatsHandler(state, statsEngine))
}
@@ -120,7 +123,8 @@ func ServeTaskHTTPEndpoint(credentialsManager credentials.Manager,
state dockerstate.TaskEngineState,
containerInstanceArn string,
cfg *config.Config,
- statsEngine stats.Engine) {
+ statsEngine stats.Engine,
+ availabilityZone string) {
// Create and initialize the audit log
// TODO Use seelog's programmatic configuration instead of xml.
logger, err := seelog.LoggerFromConfigAsString(audit.AuditLoggerConfig(cfg))
@@ -133,7 +137,7 @@ func ServeTaskHTTPEndpoint(credentialsManager credentials.Manager,
auditLogger := audit.NewAuditLog(containerInstanceArn, cfg, logger)
server := taskServerSetup(credentialsManager, auditLogger, state, cfg.Cluster, statsEngine,
- cfg.TaskMetadataSteadyStateRate, cfg.TaskMetadataBurstRate)
+ cfg.TaskMetadataSteadyStateRate, cfg.TaskMetadataBurstRate, availabilityZone)
for {
utils.RetryWithBackoff(utils.NewSimpleBackoff(time.Second, time.Minute, 0.2, 2), func() error {
diff --git a/agent/handlers/task_server_setup_test.go b/agent/handlers/task_server_setup_test.go
index 472bef82ec0..e81a94f40b5 100644
--- a/agent/handlers/task_server_setup_test.go
+++ b/agent/handlers/task_server_setup_test.go
@@ -73,6 +73,7 @@ const (
v2BaseMetadataPath = "/v2/metadata"
v3BasePath = "/v3/"
v3EndpointID = "v3eid"
+ availabilityzone = "us-west-2b"
)
var (
@@ -166,6 +167,7 @@ var (
PullStartedAt: aws.Time(now.UTC()),
PullStoppedAt: aws.Time(now.UTC()),
ExecutionStoppedAt: aws.Time(now.UTC()),
+ AvailabilityZone: availabilityzone,
}
)
@@ -311,7 +313,7 @@ func testErrorResponsesFromServer(t *testing.T, path string, expectedErrorMessag
credentialsManager := mock_credentials.NewMockManager(ctrl)
auditLog := mock_audit.NewMockAuditLogger(ctrl)
server := taskServerSetup(credentialsManager, auditLog, nil, "", nil, config.DefaultTaskMetadataSteadyStateRate,
- config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataBurstRate, "")
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", path, nil)
@@ -345,7 +347,7 @@ func getResponseForCredentialsRequest(t *testing.T, expectedStatus int,
credentialsManager := mock_credentials.NewMockManager(ctrl)
auditLog := mock_audit.NewMockAuditLogger(ctrl)
server := taskServerSetup(credentialsManager, auditLog, nil, "", nil, config.DefaultTaskMetadataSteadyStateRate,
- config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataBurstRate, "")
recorder := httptest.NewRecorder()
creds, ok := getCredentials()
@@ -412,7 +414,7 @@ func TestV2TaskMetadata(t *testing.T) {
state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true),
)
server := taskServerSetup(credentials.NewManager(), auditLog, state, clusterName, statsEngine,
- config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone)
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", tc.path, nil)
req.RemoteAddr = remoteIP + ":" + remotePort
@@ -424,7 +426,6 @@ func TestV2TaskMetadata(t *testing.T) {
err = json.Unmarshal(res, &taskResponse)
assert.NoError(t, err)
assert.Equal(t, expectedTaskResponse, taskResponse)
-
})
}
}
@@ -443,7 +444,7 @@ func TestV2ContainerMetadata(t *testing.T) {
state.EXPECT().TaskByID(containerID).Return(task, true),
)
server := taskServerSetup(credentials.NewManager(), auditLog, state, clusterName, statsEngine,
- config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "")
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", v2BaseMetadataPath+"/"+containerID, nil)
req.RemoteAddr = remoteIP + ":" + remotePort
@@ -471,7 +472,7 @@ func TestV2ContainerStats(t *testing.T) {
statsEngine.EXPECT().ContainerDockerStats(taskARN, containerID).Return(dockerStats, nil),
)
server := taskServerSetup(credentials.NewManager(), auditLog, state, clusterName, statsEngine,
- config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "")
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", v2BaseStatsPath+"/"+containerID, nil)
req.RemoteAddr = remoteIP + ":" + remotePort
@@ -518,7 +519,7 @@ func TestV2TaskStats(t *testing.T) {
statsEngine.EXPECT().ContainerDockerStats(taskARN, containerID).Return(dockerStats, nil),
)
server := taskServerSetup(credentials.NewManager(), auditLog, state, clusterName, statsEngine,
- config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "")
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", tc.path, nil)
req.RemoteAddr = remoteIP + ":" + remotePort
@@ -550,7 +551,7 @@ func TestV3TaskMetadata(t *testing.T) {
state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true),
)
server := taskServerSetup(credentials.NewManager(), auditLog, state, clusterName, statsEngine,
- config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, availabilityzone)
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID+"/task", nil)
server.Handler.ServeHTTP(recorder, req)
@@ -577,7 +578,7 @@ func TestV3ContainerMetadata(t *testing.T) {
state.EXPECT().TaskByID(containerID).Return(task, true),
)
server := taskServerSetup(credentials.NewManager(), auditLog, state, clusterName, statsEngine,
- config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "")
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID, nil)
server.Handler.ServeHTTP(recorder, req)
@@ -612,7 +613,7 @@ func TestV3TaskStats(t *testing.T) {
statsEngine.EXPECT().ContainerDockerStats(taskARN, containerID).Return(dockerStats, nil),
)
server := taskServerSetup(credentials.NewManager(), auditLog, state, clusterName, statsEngine,
- config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "")
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID+"/task/stats", nil)
server.Handler.ServeHTTP(recorder, req)
@@ -643,7 +644,7 @@ func TestV3ContainerStats(t *testing.T) {
statsEngine.EXPECT().ContainerDockerStats(taskARN, containerID).Return(dockerStats, nil),
)
server := taskServerSetup(credentials.NewManager(), auditLog, state, clusterName, statsEngine,
- config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "")
recorder := httptest.NewRecorder()
req, _ := http.NewRequest("GET", v3BasePath+v3EndpointID+"/stats", nil)
server.Handler.ServeHTTP(recorder, req)
@@ -678,7 +679,7 @@ func TestTaskHTTPEndpointErrorCode404(t *testing.T) {
statsEngine := mock_stats.NewMockEngine(ctrl)
server := taskServerSetup(credentials.NewManager(), auditLog, state, clusterName, statsEngine,
- config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "")
for _, testPath := range testPaths {
t.Run(fmt.Sprintf("Test path: %s", testPath), func(t *testing.T) {
@@ -718,7 +719,7 @@ func TestTaskHTTPEndpointErrorCode400(t *testing.T) {
statsEngine := mock_stats.NewMockEngine(ctrl)
server := taskServerSetup(credentials.NewManager(), auditLog, state, clusterName, statsEngine,
- config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate)
+ config.DefaultTaskMetadataSteadyStateRate, config.DefaultTaskMetadataBurstRate, "")
for _, testPath := range testPaths {
t.Run(fmt.Sprintf("Test path: %s", testPath), func(t *testing.T) {
diff --git a/agent/handlers/v2/response.go b/agent/handlers/v2/response.go
index 12a3eedb9bf..3ebca165edd 100644
--- a/agent/handlers/v2/response.go
+++ b/agent/handlers/v2/response.go
@@ -40,6 +40,7 @@ type TaskResponse struct {
PullStartedAt *time.Time `json:"PullStartedAt,omitempty"`
PullStoppedAt *time.Time `json:"PullStoppedAt,omitempty"`
ExecutionStoppedAt *time.Time `json:"ExecutionStoppedAt,omitempty"`
+ AvailabilityZone string `json:"AvailabilityZone,omitempty"`
}
// ContainerResponse defines the schema for the container response
@@ -75,19 +76,21 @@ type LimitsResponse struct {
// NewTaskResponse creates a new response object for the task
func NewTaskResponse(taskARN string,
state dockerstate.TaskEngineState,
- cluster string) (*TaskResponse, error) {
+ cluster string,
+ az string) (*TaskResponse, error) {
task, ok := state.TaskByArn(taskARN)
if !ok {
return nil, errors.Errorf("v2 task response: unable to find task '%s'", taskARN)
}
resp := &TaskResponse{
- Cluster: cluster,
- TaskARN: task.Arn,
- Family: task.Family,
- Revision: task.Version,
- DesiredStatus: task.GetDesiredStatus().String(),
- KnownStatus: task.GetKnownStatus().String(),
+ Cluster: cluster,
+ TaskARN: task.Arn,
+ Family: task.Family,
+ Revision: task.Version,
+ DesiredStatus: task.GetDesiredStatus().String(),
+ KnownStatus: task.GetKnownStatus().String(),
+ AvailabilityZone: az,
}
taskCPU := task.CPU
diff --git a/agent/handlers/v2/response_test.go b/agent/handlers/v2/response_test.go
index f149dfa017b..b7bc9df4f6b 100644
--- a/agent/handlers/v2/response_test.go
+++ b/agent/handlers/v2/response_test.go
@@ -34,20 +34,21 @@ import (
)
const (
- taskARN = "t1"
- cluster = "default"
- family = "sleep"
- version = "1"
- containerID = "cid"
- containerName = "sleepy"
- imageName = "busybox"
- imageID = "bUsYbOx"
- cpu = 1024
- memory = 512
- eniIPv4Address = "10.0.0.2"
- volName = "volume1"
- volSource = "/var/lib/volume1"
- volDestination = "/volume"
+ taskARN = "t1"
+ cluster = "default"
+ family = "sleep"
+ version = "1"
+ containerID = "cid"
+ containerName = "sleepy"
+ imageName = "busybox"
+ imageID = "bUsYbOx"
+ cpu = 1024
+ memory = 512
+ eniIPv4Address = "10.0.0.2"
+ volName = "volume1"
+ volSource = "/var/lib/volume1"
+ volDestination = "/volume"
+ availabilityZone = "us-west-2b"
)
func TestTaskResponse(t *testing.T) {
@@ -116,7 +117,7 @@ func TestTaskResponse(t *testing.T) {
state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true),
)
- taskResponse, err := NewTaskResponse(taskARN, state, cluster)
+ taskResponse, err := NewTaskResponse(taskARN, state, cluster, availabilityZone)
assert.NoError(t, err)
_, err = json.Marshal(taskResponse)
assert.NoError(t, err)
@@ -212,12 +213,13 @@ func TestTaskResponseMarshal(t *testing.T) {
defer ctrl.Finish()
expectedTaskResponseMap := map[string]interface{}{
- "Cluster": cluster,
- "TaskARN": taskARN,
- "Family": family,
- "Revision": version,
- "DesiredStatus": "RUNNING",
- "KnownStatus": "RUNNING",
+ "Cluster": cluster,
+ "TaskARN": taskARN,
+ "Family": family,
+ "Revision": version,
+ "DesiredStatus": "RUNNING",
+ "KnownStatus": "RUNNING",
+ "AvailabilityZone": availabilityZone,
"Containers": []interface{}{
map[string]interface{}{
"DockerId": containerID,
@@ -294,7 +296,7 @@ func TestTaskResponseMarshal(t *testing.T) {
state.EXPECT().ContainerMapByArn(taskARN).Return(containerNameToDockerContainer, true),
)
- taskResponse, err := NewTaskResponse(taskARN, state, cluster)
+ taskResponse, err := NewTaskResponse(taskARN, state, cluster, availabilityZone)
assert.NoError(t, err)
taskResponseJSON, err := json.Marshal(taskResponse)
diff --git a/agent/handlers/v2/task_container_metadata_handler.go b/agent/handlers/v2/task_container_metadata_handler.go
index 0ccb670ab2f..23d3afdc01f 100644
--- a/agent/handlers/v2/task_container_metadata_handler.go
+++ b/agent/handlers/v2/task_container_metadata_handler.go
@@ -39,7 +39,7 @@ const (
var ContainerMetadataPath = TaskMetadataPathWithSlash + utils.ConstructMuxVar(metadataContainerIDMuxName, utils.AnythingButEmptyRegEx)
// TaskContainerMetadataHandler returns the handler method for handling task and container metadata requests.
-func TaskContainerMetadataHandler(state dockerstate.TaskEngineState, cluster string) func(http.ResponseWriter, *http.Request) {
+func TaskContainerMetadataHandler(state dockerstate.TaskEngineState, cluster string, az string) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
taskARN, err := getTaskARNByRequest(r, state)
if err != nil {
@@ -55,7 +55,7 @@ func TaskContainerMetadataHandler(state dockerstate.TaskEngineState, cluster str
}
seelog.Infof("V2 task/container metadata handler: writing response for task '%s'", taskARN)
- WriteTaskMetadataResponse(w, taskARN, cluster, state)
+ WriteTaskMetadataResponse(w, taskARN, cluster, state, az)
}
}
@@ -73,9 +73,9 @@ func WriteContainerMetadataResponse(w http.ResponseWriter, containerID string, s
}
// WriteTaskMetadataResponse writes the task metadata to response writer.
-func WriteTaskMetadataResponse(w http.ResponseWriter, taskARN string, cluster string, state dockerstate.TaskEngineState) {
+func WriteTaskMetadataResponse(w http.ResponseWriter, taskARN string, cluster string, state dockerstate.TaskEngineState, az string) {
// Generate a response for the task
- taskResponse, err := NewTaskResponse(taskARN, state, cluster)
+ taskResponse, err := NewTaskResponse(taskARN, state, cluster, az)
if err != nil {
errResponseJSON, _ := json.Marshal("Unable to generate metadata for task: '" + taskARN + "'")
utils.WriteJSONToResponse(w, http.StatusBadRequest, errResponseJSON, utils.RequestTypeTaskMetadata)
diff --git a/agent/handlers/v3/task_metadata_handler.go b/agent/handlers/v3/task_metadata_handler.go
index a097283df43..f9151690f28 100644
--- a/agent/handlers/v3/task_metadata_handler.go
+++ b/agent/handlers/v3/task_metadata_handler.go
@@ -31,7 +31,7 @@ const v3EndpointIDMuxName = "v3EndpointIDMuxName"
var TaskMetadataPath = "/v3/" + utils.ConstructMuxVar(v3EndpointIDMuxName, utils.AnythingButSlashRegEx) + "/task"
// TaskMetadataHandler returns the handler method for handling task metadata requests.
-func TaskMetadataHandler(state dockerstate.TaskEngineState, cluster string) func(http.ResponseWriter, *http.Request) {
+func TaskMetadataHandler(state dockerstate.TaskEngineState, cluster string, az string) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
taskARN, err := getTaskARNByRequest(r, state)
if err != nil {
@@ -44,6 +44,6 @@ func TaskMetadataHandler(state dockerstate.TaskEngineState, cluster string) func
seelog.Infof("V3 task metadata handler: writing response for task '%s'", taskARN)
// v3 handler shares the same task metadata response format with v2 handler.
- v2.WriteTaskMetadataResponse(w, taskARN, cluster, state)
+ v2.WriteTaskMetadataResponse(w, taskARN, cluster, state, az)
}
}
diff --git a/agent/metrics/generic_metrics_client.go b/agent/metrics/generic_metrics_client.go
new file mode 100644
index 00000000000..ad613e1c089
--- /dev/null
+++ b/agent/metrics/generic_metrics_client.go
@@ -0,0 +1,136 @@
+// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package metrics
+
+import (
+ "context"
+ "crypto/md5"
+ "fmt"
+ "math/rand"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/cihub/seelog"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ callTimeout = 2 * time.Minute
+)
+
+// A GenericMetricsClient records 3 metrics:
+// 1) A Prometheus summary vector representing call durations for different API calls
+// 2) A durations guage vector that updates the last recorded duration for the API call
+// allowing for a time series view in the Prometheus browser
+// 3) A counter vector that increments call counts for each API call
+// The outstandingCalls map allows Fired CallStarts to be matched with Fired CallEnds
+type GenericMetrics struct {
+ durationVec *prometheus.SummaryVec
+ durations *prometheus.GaugeVec
+ counterVec *prometheus.CounterVec
+ lock sync.RWMutex
+ outstandingCalls map[string]time.Time
+}
+
+func Init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// This function creates a hash of the callName and a randomly generated number.
+// The hash serves to match the call invokation with the call's end, which is
+// required to compute the call duration. The final metric is observed when the
+// FireCallEnd function is called.
+// If callID is empty, then we initiate the call with FireCallStart
+// We use a channel holding 1 bool to ensure that the FireCallEnd is called AFTER
+// the FireCallStart (because these are done in separate go routines)
+func (gm *GenericMetrics) RecordCall(callID, callName string, callTime time.Time, callStarted chan bool) string {
+ if callID == "" {
+ hashData := []byte("GenericMetrics-" + callName + strconv.FormatFloat(float64(rand.Float32()), 'f', -1, 32))
+ hash := fmt.Sprintf("%x", md5.Sum(hashData))
+ // Go routines are utilized to avoid blocking the main thread in case of
+ // resource contention with var outstandingCalls
+ go gm.FireCallStart(hash, callName, callTime, callStarted)
+ go gm.IncrementCallCount(callName)
+ return hash
+ } else {
+ go gm.FireCallEnd(callID, callName, callTime, callStarted)
+ return ""
+ }
+}
+
+func (gm *GenericMetrics) FireCallStart(callHash, callName string, timestamp time.Time, callStarted chan bool) {
+ gm.lock.Lock()
+ defer gm.lock.Unlock()
+ // Check map to see if call is outstanding, otherwise, store in map
+ if _, found := gm.outstandingCalls[callHash]; !found {
+ gm.outstandingCalls[callHash] = timestamp
+ } else {
+ seelog.Errorf("Call is already outstanding: %s", callName)
+ }
+ callStarted <- true
+}
+
+func (gm *GenericMetrics) FireCallEnd(callHash, callName string, timestamp time.Time, callStarted chan bool) {
+ defer func() {
+ if r := recover(); r != nil {
+ seelog.Errorf("FireCallEnd for %s panicked. Recovering quietly: %s", callName, r)
+ }
+ }()
+ // We will block until the FireCallStart complement has completed
+ ctx, cancel := context.WithTimeout(context.Background(), callTimeout)
+ defer cancel()
+ select {
+ case <-ctx.Done():
+ seelog.Errorf("FireCallEnd timed out with %s", callName)
+ gm.lock.Lock()
+ delete(gm.outstandingCalls, callHash)
+ gm.lock.Unlock()
+ return
+ case <-callStarted:
+ break
+ }
+
+ gm.lock.Lock()
+ defer gm.lock.Unlock()
+ // Check map to see if call is outstanding and calculate duration
+ if timeStart, found := gm.outstandingCalls[callHash]; found {
+ seconds := timestamp.Sub(timeStart)
+ gm.durationVec.WithLabelValues(callName).Observe(seconds.Seconds())
+ gm.durations.WithLabelValues(callName).Set(seconds.Seconds())
+ delete(gm.outstandingCalls, callHash)
+ } else {
+ seelog.Errorf("Call is not outstanding: %s", callName)
+ }
+}
+
+// Simple Timeout function
+func startTimeout(timeout chan bool) {
+ time.Sleep(callTimeout)
+ timeout <- true
+}
+
+// This function increments the call count for a specific API call
+// This is invoked at the API call's start, whereas the duration metrics
+// are updated at the API call's end.
+func (gm *GenericMetrics) IncrementCallCount(callName string) {
+ defer func() {
+ if r := recover(); r != nil {
+ seelog.Errorf("IncrementCallCount for %s panicked. Recovering quietly: %s", callName, r)
+ }
+ }()
+ gm.lock.Lock()
+ defer gm.lock.Unlock()
+ gm.counterVec.WithLabelValues(callName).Inc()
+}
diff --git a/agent/metrics/interface.go b/agent/metrics/interface.go
new file mode 100644
index 00000000000..f2169dfa072
--- /dev/null
+++ b/agent/metrics/interface.go
@@ -0,0 +1,79 @@
+// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package metrics
+
+import (
+ "time"
+
+ "github.com/aws/amazon-ecs-agent/agent/config"
+)
+
+// MetricsClient defines the behavior for any Client that uses the
+// MetricsEngine_ interface to collect metrics for Prometheus.
+// Clients should all have the same type of metrics collected
+// (Durations, counts, etc.)
+// In particular, the API calls we monitor in the Agent code (TaskEngine,
+// Docker, ECSClient, etc.) utilize call durations and counts which are
+// encapsulated in the GenericMetricsClient.
+// This interface is extensible for future clients that require different
+// collection behaviors.
+type MetricsClient interface {
+ // RecordCall is responsible for accepting a call ID, call Name,
+ // and timestamp for the call.
+ // The specific behavior of RecordCall is dependent on the type of client
+ // that is used. It is the responsibility of the MetricsEngine to call the
+ // appropriate RecordCall(...) method.
+ // This method is the defining function for this interface.
+ // We use a channel holding 1 bool to ensure that the FireCallEnd is called AFTER
+ // the FireCallStart (because these are done in separate go routines)
+ RecordCall(string, string, time.Time, chan bool) string
+
+ // In order to record call duration, we must fire a method's start and end
+ // at different times (once at the beginning and once at the end). Ideally,
+ // RecordCall should handle this through an execution and returned function
+ // to be deferred (see docker_client.go for usage examples)
+ FireCallStart(string, string, time.Time, chan bool)
+ FireCallEnd(string, string, time.Time, chan bool)
+
+ // This function will increment the call count. The metric for call duration
+ // should ideally have the accurate call count as well, but the Prometheus
+ // summary metric is updated for count when the FireCallEnd(...) function is
+ // called. When a FireCallStart(...) is called but its FireCallEnd(...) is not,
+ // we will see a discrepancy between the Summary's count and this Gauge count
+ IncrementCallCount(string)
+}
+
+// MetricsEngine_ is an interface that drives metric collection over
+// all existing MetricsClients. The APIType parameter corresponds to
+// the type of MetricsClient RecordCall(...) function that will be used.
+// The MetricsEngine_ should be instantiated at Agent startup time with
+// Agent configurations and context passed as parameters
+type MetricsEngine_ interface {
+ // This init function initializes the Global MetricsEngine variable that
+ // can be accessed throughout the Agent codebase on packages that the
+ // metrics package does not depend on (cyclic dependencies not allowed).
+ MustInit(*config.Config)
+
+ // This function calls a specific APIType's Client's RecordCall(...) method.
+ // As discussed in the comments for MetricsClient, different Clients have
+ // have different RecordCall(...) behaviors. Wrapper functions for this
+ // function should be created like recordGenericMetric(...) in metrics_engine.go
+ // to record 2 metrics (function invokation and deferred function end).
+ recordMetric(APIType, string, string)
+
+ // publishMetrics is used to listen on defined ports on localhost
+ // for incoming requests to expose metrics. An externally run Prometheus
+ // server can then 'scrape' the exposed metrics for collection and storage
+ publishMetrics()
+}
diff --git a/agent/metrics/metrics_engine.go b/agent/metrics/metrics_engine.go
new file mode 100644
index 00000000000..c02da18111a
--- /dev/null
+++ b/agent/metrics/metrics_engine.go
@@ -0,0 +1,157 @@
+// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package metrics
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/aws/amazon-ecs-agent/agent/config"
+ "github.com/cihub/seelog"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+type APIType int32
+type MetricsEngine struct {
+ collection bool
+ cfg *config.Config
+ ctx context.Context
+ Registry *prometheus.Registry
+ managedMetrics map[APIType]MetricsClient
+}
+
+const (
+ DockerAPI APIType = iota
+ TaskEngine
+ StateManager
+ ECSClient
+)
+
+// Maintained list of APIs for which we collect metrics. MetricsClients will be
+// initialized using Factory method when a MetricsEngine is created.
+var (
+ managedAPIs = map[APIType]string{
+ DockerAPI: "Docker_API",
+ TaskEngine: "Task_Engine",
+ StateManager: "State_Manager",
+ ECSClient: "ECS_Client",
+ }
+ MetricsEngineGlobal *MetricsEngine = &MetricsEngine{
+ collection: false,
+ }
+)
+
+// Function called during Agent start up to expose metrics on a local endpoint
+func PublishMetrics() {
+ if MetricsEngineGlobal.collection {
+ MetricsEngineGlobal.publishMetrics()
+ }
+}
+
+// Initializes the Global MetricsEngine used throughout Agent
+// Currently, we use the Prometheus Global Default Registerer, which also collecs
+// basic Go application metrics that we use (like memory usage).
+// In future cases, we can use a custom Prometheus Registry to group metrics.
+// For unit testing purposes, we only focus on API calls and use our own Registry
+func MustInit(cfg *config.Config, registry ...*prometheus.Registry) {
+ if !cfg.PrometheusMetricsEnabled {
+ return
+ }
+ var registryToUse *prometheus.Registry
+ if len(registry) > 0 {
+ registryToUse = registry[0]
+ } else {
+ registryToUse = prometheus.DefaultRegisterer.(*prometheus.Registry)
+ }
+ MetricsEngineGlobal = NewMetricsEngine(cfg, registryToUse)
+ MetricsEngineGlobal.collection = true
+}
+
+// We create a MetricsClient for all managed APIs (APIs for which we will collect
+// metrics)
+func NewMetricsEngine(cfg *config.Config, registry *prometheus.Registry) *MetricsEngine {
+ metricsEngine := &MetricsEngine{
+ cfg: cfg,
+ Registry: registry,
+ managedMetrics: make(map[APIType]MetricsClient),
+ }
+ for managedAPI, _ := range managedAPIs {
+ aClient := NewMetricsClient(managedAPI, metricsEngine.Registry)
+ metricsEngine.managedMetrics[managedAPI] = aClient
+ }
+ return metricsEngine
+}
+
+// Wrapper function that allows APIs to call a single function
+func (engine *MetricsEngine) RecordDockerMetric(callName string) func() {
+ return engine.recordGenericMetric(DockerAPI, callName)
+}
+
+// Wrapper function that allows APIs to call a single function
+func (engine *MetricsEngine) RecordTaskEngineMetric(callName string) func() {
+ return engine.recordGenericMetric(TaskEngine, callName)
+}
+
+// Wrapper function that allows APIs to call a single function
+func (engine *MetricsEngine) RecordStateManagerMetric(callName string) func() {
+ return engine.recordGenericMetric(StateManager, callName)
+}
+
+// Wrapper function that allows APIs to call a single function
+func (engine *MetricsEngine) RecordECSClientMetric(callName string) func() {
+ return engine.recordGenericMetric(ECSClient, callName)
+}
+
+// Records a call's start and returns a function to be deferred.
+// Wrapper functions will use this function for GenericMetricsClients.
+// If Metrics collection is enabled from the cfg, we record a metric with callID
+// as an empty string (signaling a call start), and then return a function to
+// record a second metric with a non-empty callID.
+// We use a channel holding 1 bool to ensure that the FireCallEnd is called AFTER
+// the FireCallStart (because these are done in separate go routines)
+// Recording a metric in an API needs only a wrapper function that supplies the
+// APIType and called using the following format:
+// defer metrics.MetricsEngineGlobal.RecordMetricWrapper(callName)()
+func (engine *MetricsEngine) recordGenericMetric(apiType APIType, callName string) func() {
+ callStarted := make(chan bool, 1)
+ if engine == nil || !engine.collection {
+ return func() {
+ }
+ }
+ callID := engine.recordMetric(apiType, callName, "", callStarted)
+ return func() {
+ engine.recordMetric(apiType, callName, callID, callStarted)
+ }
+}
+
+func (engine *MetricsEngine) recordMetric(apiType APIType, callName, callID string, callStarted chan bool) string {
+ return engine.managedMetrics[apiType].RecordCall(callID, callName, time.Now(), callStarted)
+}
+
+// Function that exposes all Agent Metrics on a given port.
+func (engine *MetricsEngine) publishMetrics() {
+ go func() {
+ // Because we are using the DefaultRegisterer in Prometheus, we can use
+ // the promhttp.Handler() function. In future cases for custom registers,
+ // we can use promhttp.HandlerFor(customRegisterer, promhttp.HandlerOpts{})
+ http.Handle("/metrics", promhttp.Handler())
+ err := http.ListenAndServe(fmt.Sprintf(":%d", config.AgentPrometheusExpositionPort), nil)
+ if err != nil {
+ seelog.Errorf("Error publishing metrics: %s", err.Error())
+ }
+ }()
+}
diff --git a/agent/metrics/metrics_factory.go b/agent/metrics/metrics_factory.go
new file mode 100644
index 00000000000..ec2fd61e7db
--- /dev/null
+++ b/agent/metrics/metrics_factory.go
@@ -0,0 +1,82 @@
+// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package metrics
+
+import (
+ "time"
+
+ "github.com/cihub/seelog"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ AgentNamespace = "AgentMetrics"
+ DockerSubsystem = "DockerAPI"
+ TaskEngineSubsystem = "TaskEngine"
+ StateManagerSubsystem = "StateManager"
+ ECSClientSubsystem = "ECSClient"
+)
+
+// A factory method that enables various MetricsClients to be created.
+func NewMetricsClient(api APIType, registry *prometheus.Registry) MetricsClient {
+ switch api {
+ case DockerAPI:
+ return NewGenericMetricsClient(DockerSubsystem, registry)
+ case TaskEngine:
+ return NewGenericMetricsClient(TaskEngineSubsystem, registry)
+ case StateManager:
+ return NewGenericMetricsClient(StateManagerSubsystem, registry)
+ case ECSClient:
+ return NewGenericMetricsClient(ECSClientSubsystem, registry)
+ default:
+ seelog.Error("Unmanaged MetricsClient cannot be created.")
+ return nil
+ }
+}
+
+func NewGenericMetricsClient(subsystem string, registry *prometheus.Registry) *GenericMetrics {
+ aDurationVec := prometheus.NewSummaryVec(prometheus.SummaryOpts{
+ Namespace: AgentNamespace,
+ Subsystem: subsystem,
+ Name: "duration_seconds",
+ Help: subsystem + " call duration in seconds",
+ Objectives: make(map[float64]float64),
+ }, []string{"Call"})
+ registry.MustRegister(aDurationVec)
+
+ aCounterVec := prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: AgentNamespace,
+ Subsystem: subsystem,
+ Name: "call_count",
+ }, []string{"Call"})
+ registry.MustRegister(aCounterVec)
+
+ aGaugeVec := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: AgentNamespace,
+ Subsystem: subsystem,
+ Name: "call_duration",
+ Help: subsystem + " call duration in seconds individual",
+ },
+ []string{"Call"})
+ registry.MustRegister(aGaugeVec)
+
+ genericMetrics := &GenericMetrics{
+ durationVec: aDurationVec,
+ counterVec: aCounterVec,
+ durations: aGaugeVec,
+ outstandingCalls: make(map[string]time.Time),
+ }
+ return genericMetrics
+}
diff --git a/agent/metrics/metrics_test.go b/agent/metrics/metrics_test.go
new file mode 100644
index 00000000000..bcce3490d39
--- /dev/null
+++ b/agent/metrics/metrics_test.go
@@ -0,0 +1,225 @@
+// +build linux,unit
+
+// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package metrics
+
+import (
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/aws/amazon-ecs-agent/agent/config"
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/stretchr/testify/assert"
+)
+
+// Create default config for Metrics. PrometheusMetricsEnabled is set to false
+// by default, so it must be turned on
+func getTestConfig() config.Config {
+ cfg := config.DefaultConfig()
+ cfg.PrometheusMetricsEnabled = true
+ return cfg
+}
+
+// Tests if MetricsEngineGlobal variable is initialized and if all managed
+// MetricsClients are initialized
+func TestMetricsEngineInit(t *testing.T) {
+ defer func() {
+ MetricsEngineGlobal = &MetricsEngine{
+ collection: false,
+ }
+ }()
+ cfg := getTestConfig()
+ MustInit(&cfg, prometheus.NewRegistry())
+ assert.NotNil(t, MetricsEngineGlobal)
+ assert.Equal(t, len(MetricsEngineGlobal.managedMetrics), len(managedAPIs))
+}
+
+// Tests if a default config will start Prometheus metrics. Should be disabled
+// by default.
+func TestDisablePrometheusMetrics(t *testing.T) {
+ defer func() {
+ MetricsEngineGlobal = &MetricsEngine{
+ collection: false,
+ }
+ }()
+ cfg := getTestConfig()
+ cfg.PrometheusMetricsEnabled = false
+ MustInit(&cfg, prometheus.NewRegistry())
+ PublishMetrics()
+ assert.False(t, MetricsEngineGlobal.collection)
+}
+
+// Mimicks metric collection of Docker API calls through Go routines. The method
+// call to record a metric is the same used by various clients throughout Agent.
+// We sleep the go routine to simulate "work" being done.
+// We can determine the expected values for the metrics and create a map of them,
+// which will then be used to verify the accuracy of the metrics collected.
+func TestMetricCollection(t *testing.T) {
+ defer func() {
+ MetricsEngineGlobal = &MetricsEngine{
+ collection: false,
+ }
+ }()
+ cfg := getTestConfig()
+ MustInit(&cfg, prometheus.NewRegistry())
+ MetricsEngineGlobal.collection = true
+
+ var DockerMetricSleepTime4 time.Duration = 4 * time.Second
+ var DockerMetricSleepTime2 time.Duration = 2 * time.Second
+
+ var wg sync.WaitGroup
+ wg.Add(40)
+
+ // These Go routines simulate metrics collection
+ go func() {
+ for i := 0; i < 10; i++ {
+ go func() {
+ defer wg.Done()
+ defer MetricsEngineGlobal.RecordDockerMetric("START")()
+ time.Sleep(DockerMetricSleepTime4)
+ }()
+ }
+ }()
+ go func() {
+ for i := 0; i < 10; i++ {
+ go func() {
+ defer wg.Done()
+ defer MetricsEngineGlobal.RecordDockerMetric("START")()
+ time.Sleep(DockerMetricSleepTime2)
+ }()
+ }
+ }()
+ go func() {
+ for i := 0; i < 10; i++ {
+ go func() {
+ defer wg.Done()
+ defer MetricsEngineGlobal.RecordDockerMetric("STOP")()
+ time.Sleep(DockerMetricSleepTime4)
+ }()
+ }
+ }()
+ go func() {
+ for i := 0; i < 10; i++ {
+ go func() {
+ defer wg.Done()
+ defer MetricsEngineGlobal.RecordDockerMetric("STOP")()
+ time.Sleep(DockerMetricSleepTime2)
+ }()
+ }
+ }()
+ time.Sleep(2 * time.Second)
+ wg.Wait()
+
+ // This will gather all collected metrics and store them in a MetricFamily list
+ // All metric families can be printed by looping over this variable using
+ // fmt.Println(proto.MarshalTextString(metricFamilies[n])) where n = index
+ metricFamilies, err := MetricsEngineGlobal.Registry.Gather()
+ assert.NoError(t, err)
+
+ // Here we set up the expectations. These are known values which make verfication
+ // easier.
+ expected := make(metricMap)
+ expected["AgentMetrics_DockerAPI_call_count"] = make(map[string][]interface{})
+ expected["AgentMetrics_DockerAPI_call_count"]["CallSTART"] = []interface{}{
+ "COUNTER",
+ 20.0,
+ }
+ expected["AgentMetrics_DockerAPI_call_count"]["CallSTOP"] = []interface{}{
+ "COUNTER",
+ 20.0,
+ }
+ expected["AgentMetrics_DockerAPI_call_duration"] = make(map[string][]interface{})
+ expected["AgentMetrics_DockerAPI_call_duration"]["CallSTART"] = []interface{}{
+ "GUAGE",
+ 0.0,
+ }
+ expected["AgentMetrics_DockerAPI_call_duration"]["CallSTOP"] = []interface{}{
+ "GUAGE",
+ 0.0,
+ }
+ expected["AgentMetrics_DockerAPI_duration_seconds"] = make(map[string][]interface{})
+ expected["AgentMetrics_DockerAPI_duration_seconds"]["CallSTART"] = []interface{}{
+ "SUMMARY",
+ 3.0,
+ }
+ expected["AgentMetrics_DockerAPI_duration_seconds"]["CallSTOP"] = []interface{}{
+ "SUMMARY",
+ 3.0,
+ }
+ // We will do a simple tree search to verify all metrics in metricsFamilies
+ // are as expected
+ assert.True(t, verifyStats(metricFamilies, expected), "Metrics are not accurate")
+}
+
+// A type for storing a Tree-based map. We map the MetricName to a map of metrics
+// under that name. This second map indexes by MetricLabelName+MetricLabelValue to
+// a slice MetricType and MetricValue.
+//MetricName:metricLabelName+metricLabelValue:[metricType, metricValue]
+type metricMap map[string]map[string][]interface{}
+
+// In order to verify the MetricFamily with the expected metric values, we do a simple
+// tree search to verify that all stats in the MetricFamily coincide with the expected
+// metric values.
+// This method only verifes that all metrics in var metricsReceived are present in
+// var expectedMetrics
+func verifyStats(metricsReceived []*dto.MetricFamily, expectedMetrics metricMap) bool {
+ var threshhold float64 = 0.1 // Maximum threshhold for two metrics being equal
+ for _, metricFamily := range metricsReceived {
+ if metricList, found := expectedMetrics[metricFamily.GetName()]; found {
+ for _, metric := range metricFamily.GetMetric() {
+ if aMetric, found := metricList[metric.GetLabel()[0].GetName()+metric.GetLabel()[0].GetValue()]; found {
+ metricTypeExpected := string(aMetric[0].(string))
+ metricValExpected := float64(aMetric[1].(float64))
+ switch metricTypeExpected {
+ case "GUAGE":
+ continue
+ case "COUNTER":
+ if !compareDiff(metricValExpected, metric.GetCounter().GetValue(), threshhold) {
+ fmt.Printf("Does not match SUMMARY. Expected: %f, Received: %f\n", metricValExpected, metric.GetCounter().GetValue())
+ return false
+ }
+ case "SUMMARY":
+ if !compareDiff(metricValExpected, metric.GetSummary().GetSampleSum()/float64(metric.GetSummary().GetSampleCount()), threshhold) {
+ fmt.Printf("Does not match SUMMARY. Expected: %f, Received: %f\n", metricValExpected, metric.GetSummary().GetSampleSum()/float64(metric.GetSummary().GetSampleCount()))
+ return false
+ }
+ default:
+ fmt.Println("Metric Type not recognized")
+ return false
+ }
+ } else {
+ fmt.Println("MetricLabel Name and Value combo not found")
+ return false
+ }
+ }
+ } else {
+ fmt.Println("MetricName not found")
+ return false
+ }
+ }
+ return true
+}
+
+// Helper function to determine if two values (a and b) are within a percentage of a
+func compareDiff(a, b, deltaMin float64) bool {
+ diff := a - b
+ if diff < 0 {
+ diff = -diff
+ }
+ return diff <= (a * deltaMin)
+}
diff --git a/agent/statemanager/state_manager.go b/agent/statemanager/state_manager.go
index 197884a139d..465fc690991 100644
--- a/agent/statemanager/state_manager.go
+++ b/agent/statemanager/state_manager.go
@@ -27,6 +27,7 @@ import (
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/logger"
+ "github.com/aws/amazon-ecs-agent/agent/metrics"
)
const (
@@ -70,7 +71,10 @@ const (
// 17)
// a) Add 'secrets' field to 'apicontainer.Container'
// b) Add 'ssmsecret' field to 'resources'
- ECSDataVersion = 17
+ // 18)
+ // a) Add 'AvailabilityZone' field to the TaskResponse struct
+ // b) Add 'asmsecret' field to 'resources'
+ ECSDataVersion = 18
// ecsDataFile specifies the filename in the ECS_DATADIR
ecsDataFile = "ecs_agent_data.json"
@@ -192,6 +196,7 @@ func AddSaveable(name string, saveable Saveable) Option {
// Save triggers a save to file, though respects a minimum save interval to wait
// between saves.
func (manager *basicStateManager) Save() error {
+ defer metrics.MetricsEngineGlobal.RecordStateManagerMetric("SAVE")()
manager.saveTimesLock.Lock()
defer manager.saveTimesLock.Unlock()
if time.Since(manager.lastSave) >= minSaveInterval {
diff --git a/agent/statemanager/state_manager_test.go b/agent/statemanager/state_manager_test.go
index 33c86218549..7291487b9ff 100644
--- a/agent/statemanager/state_manager_test.go
+++ b/agent/statemanager/state_manager_test.go
@@ -221,10 +221,10 @@ func TestLoadsDataForPrivateRegistryTask(t *testing.T) {
// verify that the state manager correctly loads ssm secrets related fields in state file
func TestLoadsDataForSecretsTask(t *testing.T) {
- cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v16", "secrets", "ecs_agent_data.json"))
+ cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v17", "secrets", "ecs_agent_data.json"))
require.Nil(t, err, "Failed to set up test")
defer cleanup()
- cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v16", "secrets")}
+ cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v17", "secrets")}
taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil)
var containerInstanceArn, cluster, savedInstanceID string
var sequenceNumber int64
@@ -251,9 +251,78 @@ func TestLoadsDataForSecretsTask(t *testing.T) {
assert.Equal(t, "container_1", container.Name)
assert.NotNil(t, container.Secrets)
secret := container.Secrets[0]
- assert.Equal(t, "ENVIRONMENT_VARIABLES", secret.Type)
+ assert.Equal(t, "ENVIRONMENT_VARIABLE", secret.Type)
assert.Equal(t, "ssm-secret", secret.Name)
assert.Equal(t, "us-west-2", secret.Region)
assert.Equal(t, "secret-value-from", secret.ValueFrom)
assert.Equal(t, "ssm", secret.Provider)
}
+
+func TestLoadsDataForAddingAvailabilityZoneInTask(t *testing.T) {
+ cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v18", "availabilityZone", "ecs_agent_data.json"))
+ require.Nil(t, err, "Failed to set up test")
+ defer cleanup()
+ cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v18", "availabilityZone")}
+ taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil)
+ var containerInstanceArn, cluster, savedInstanceID, availabilityZone string
+ var sequenceNumber int64
+ stateManager, err := statemanager.NewStateManager(cfg,
+ statemanager.AddSaveable("TaskEngine", taskEngine),
+ statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn),
+ statemanager.AddSaveable("Cluster", &cluster),
+ statemanager.AddSaveable("EC2InstanceID", &savedInstanceID),
+ statemanager.AddSaveable("SeqNum", &sequenceNumber),
+ statemanager.AddSaveable("AvailabilityZone", &availabilityZone),
+ )
+ assert.NoError(t, err)
+ err = stateManager.Load()
+ assert.NoError(t, err)
+ assert.Equal(t, "state-file", cluster)
+ assert.EqualValues(t, 0, sequenceNumber)
+ tasks, err := taskEngine.ListTasks()
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(tasks))
+ task := tasks[0]
+ assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", task.Arn)
+ assert.Equal(t, 1, len(task.Containers))
+ assert.Equal(t, "us-west-2c", availabilityZone)
+}
+
+// verify that the state manager correctly loads asm secrets related fields in state file
+func TestLoadsDataForASMSecretsTask(t *testing.T) {
+ cleanup, err := setupWindowsTest(filepath.Join(".", "testdata", "v18", "secrets", "ecs_agent_data.json"))
+ require.Nil(t, err, "Failed to set up test")
+ defer cleanup()
+ cfg := &config.Config{DataDir: filepath.Join(".", "testdata", "v18", "secrets")}
+ taskEngine := engine.NewTaskEngine(&config.Config{}, nil, nil, nil, nil, dockerstate.NewTaskEngineState(), nil, nil)
+ var containerInstanceArn, cluster, savedInstanceID string
+ var sequenceNumber int64
+ stateManager, err := statemanager.NewStateManager(cfg,
+ statemanager.AddSaveable("TaskEngine", taskEngine),
+ statemanager.AddSaveable("ContainerInstanceArn", &containerInstanceArn),
+ statemanager.AddSaveable("Cluster", &cluster),
+ statemanager.AddSaveable("EC2InstanceID", &savedInstanceID),
+ statemanager.AddSaveable("SeqNum", &sequenceNumber),
+ )
+ assert.NoError(t, err)
+ err = stateManager.Load()
+ assert.NoError(t, err)
+ assert.Equal(t, "state-file", cluster)
+ assert.EqualValues(t, 0, sequenceNumber)
+ tasks, err := taskEngine.ListTasks()
+ assert.NoError(t, err)
+ assert.Equal(t, 1, len(tasks))
+ task := tasks[0]
+ assert.Equal(t, "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4", task.Arn)
+ assert.Equal(t, "secrets-state", task.Family)
+ assert.Equal(t, 1, len(task.Containers))
+ container := task.Containers[0]
+ assert.Equal(t, "container_1", container.Name)
+ assert.NotNil(t, container.Secrets)
+ secret := container.Secrets[1]
+ assert.Equal(t, "ENVIRONMENT_VARIABLE", secret.Type)
+ assert.Equal(t, "asm-secret", secret.Name)
+ assert.Equal(t, "us-west-2", secret.Region)
+ assert.Equal(t, "secret-value-from", secret.ValueFrom)
+ assert.Equal(t, "asm", secret.Provider)
+}
diff --git a/agent/statemanager/testdata/v17/secrets/ecs_agent_data.json b/agent/statemanager/testdata/v17/secrets/ecs_agent_data.json
new file mode 100644
index 00000000000..16e7cabb908
--- /dev/null
+++ b/agent/statemanager/testdata/v17/secrets/ecs_agent_data.json
@@ -0,0 +1,219 @@
+{
+ "Data": {
+ "Cluster": "state-file",
+ "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/46efd519-df3f-4096-8f34-faebb1747752",
+ "EC2InstanceID": "i-0da29eb1a8a98768b",
+ "TaskEngine": {
+ "Tasks": [
+ {
+ "Arn": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4",
+ "Family": "secrets-state",
+ "Version": "1",
+ "Containers": [
+ {
+ "Name": "container_1",
+ "Image": "amazonlinux:1",
+ "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e",
+ "Command": [
+ "sleep",
+ "3600"
+ ],
+ "Cpu": 0,
+ "Memory": 512,
+ "Links": null,
+ "volumesFrom": [],
+ "mountPoints": [],
+ "portMappings": [],
+ "Essential": true,
+ "EntryPoint": null,
+ "environment": {},
+ "overrides": {
+ "command": null
+ },
+ "dockerConfig": {
+ "config": "{}",
+ "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}",
+ "version": "1.17"
+ },
+ "registryAuthentication": null,
+ "secrets": [
+ {
+ "name": "ssm-secret",
+ "valueFrom": "secret-value-from",
+ "provider": "ssm",
+ "containerPath": null,
+ "type": "ENVIRONMENT_VARIABLE",
+ "region": "us-west-2"
+ }
+ ],
+ "LogsAuthStrategy": "",
+ "desiredStatus": "RUNNING",
+ "KnownStatus": "RUNNING",
+ "TransitionDependencySet": {
+ "1": {
+ "ContainerDependencies": null,
+ "ResourceDependencies": [
+ {
+ "Name": "cgroup",
+ "RequiredStatus": 1
+ },
+ {
+ "Name": "ssmsecret",
+ "RequiredStatus": 1
+ }
+ ]
+ }
+ },
+ "RunDependencies": null,
+ "IsInternal": "NORMAL",
+ "ApplyingError": {
+ "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n",
+ "name": "CannotPullContainerError"
+ },
+ "SentStatus": "RUNNING",
+ "metadataFileUpdated": false,
+ "KnownExitCode": null,
+ "KnownPortBindings": null
+ }
+ ],
+ "resources": {
+ "ssmsecret": [
+ {
+ "taskARN": "/ecs/33425c99-5db7-45fb-8244-bc94d00661e4",
+ "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244",
+ "createdAt": "0001-01-01T00:00:00Z",
+ "desiredStatus": "CREATED",
+ "knownStatus": "CREATED",
+ "secretResources": {
+ "us-west-2": [
+ {
+ "name": "ssm-secret",
+ "valueFrom": "secret-value-from",
+ "provider": "ssm",
+ "containerPath": null,
+ "type": "ENVIRONMENT_VARIABLE",
+ "region": "us-west-2"
+ }
+ ]
+ }
+ }
+ ],
+ "cgroup": [
+ {
+ "cgroupRoot": "/ecs/33425c99-5db7-45fb-8244-bc94d00661e4",
+ "cgroupMountPath": "/sys/fs/cgroup",
+ "createdAt": "0001-01-01T00:00:00Z",
+ "desiredStatus": "CREATED",
+ "knownStatus": "CREATED",
+ "resourceSpec": {
+ "cpu": {
+ "shares": 2
+ }
+ }
+ }
+ ]
+ },
+ "volumes": [],
+ "DesiredStatus": "RUNNING",
+ "KnownStatus": "RUNNING",
+ "KnownTime": "2018-10-04T18:05:49.121835686Z",
+ "PullStartedAt": "2018-10-04T18:05:34.359798761Z",
+ "PullStoppedAt": "2018-10-04T18:05:48.445985904Z",
+ "ExecutionStoppedAt": "0001-01-01T00:00:00Z",
+ "SentStatus": "RUNNING",
+ "StartSequenceNumber": 2,
+ "StopSequenceNumber": 0,
+ "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244",
+ "ENI": null,
+ "MemoryCPULimitsEnabled": true,
+ "PlatformFields": {}
+ }
+ ],
+ "IdToContainer": {
+ "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": {
+ "DockerId": "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956",
+ "DockerName": "ecs-private-registry-state-1-container1-a68ef4b6e0fba38d3500",
+ "Container": {
+ "Name": "container_1",
+ "Image": "amazonlinux:1",
+ "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e",
+ "Command": [
+ "sleep",
+ "3600"
+ ],
+ "Cpu": 0,
+ "Memory": 512,
+ "Links": null,
+ "volumesFrom": [],
+ "mountPoints": [],
+ "portMappings": [],
+ "Essential": true,
+ "EntryPoint": null,
+ "environment": {},
+ "overrides": {
+ "command": null
+ },
+ "dockerConfig": {
+ "config": "{}",
+ "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}",
+ "version": "1.17"
+ },
+ "registryAuthentication": null,
+ "LogsAuthStrategy": "",
+ "desiredStatus": "RUNNING",
+ "KnownStatus": "RUNNING",
+ "TransitionDependencySet": {
+ "1": {
+ "ContainerDependencies": null,
+ "ResourceDependencies": [
+ {
+ "Name": "cgroup",
+ "RequiredStatus": 1
+ },
+ {
+ "Name": "ssmsecret",
+ "RequiredStatus": 1
+ },
+ {
+ "Name": "asmsecret",
+ "RequiredStatus": 1
+ }
+ ]
+ }
+ },
+ "RunDependencies": null,
+ "IsInternal": "NORMAL",
+ "ApplyingError": {
+ "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n",
+ "name": "CannotPullContainerError"
+ },
+ "SentStatus": "RUNNING",
+ "metadataFileUpdated": false,
+ "KnownExitCode": null,
+ "KnownPortBindings": null
+ }
+ }
+ },
+ "IdToTask": {
+ "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4"
+ },
+ "ImageStates": [
+ {
+ "Image": {
+ "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e",
+ "Names": [
+ "amazonlinux:1"
+ ],
+ "Size": 165452304
+ },
+ "PulledAt": "2018-10-04T18:05:48.445644088Z",
+ "LastUsedAt": "2018-10-04T18:05:48.445645342Z",
+ "PullSucceeded": false
+ }
+ ],
+ "ENIAttachments": null,
+ "IPToTask": {}
+ }
+ },
+ "Version": 17
+}
diff --git a/agent/statemanager/testdata/v16/secrets/ecs_agent_data.json b/agent/statemanager/testdata/v18/availabilityZone/ecs_agent_data.json
similarity index 99%
rename from agent/statemanager/testdata/v16/secrets/ecs_agent_data.json
rename to agent/statemanager/testdata/v18/availabilityZone/ecs_agent_data.json
index 0fee41c6417..a7e699bf176 100644
--- a/agent/statemanager/testdata/v16/secrets/ecs_agent_data.json
+++ b/agent/statemanager/testdata/v18/availabilityZone/ecs_agent_data.json
@@ -1,5 +1,6 @@
{
"Data": {
+ "AvailabilityZone": "us-west-2c",
"Cluster": "state-file",
"ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/46efd519-df3f-4096-8f34-faebb1747752",
"EC2InstanceID": "i-0da29eb1a8a98768b",
@@ -211,5 +212,5 @@
"IPToTask": {}
}
},
- "Version": 16
-}
\ No newline at end of file
+ "Version": 18
+}
diff --git a/agent/statemanager/testdata/v18/secrets/ecs_agent_data.json b/agent/statemanager/testdata/v18/secrets/ecs_agent_data.json
new file mode 100644
index 00000000000..feb2f277edd
--- /dev/null
+++ b/agent/statemanager/testdata/v18/secrets/ecs_agent_data.json
@@ -0,0 +1,252 @@
+{
+ "Data": {
+ "Cluster": "state-file",
+ "ContainerInstanceArn": "arn:aws:ecs:us-west-2:1234567890:container-instance/46efd519-df3f-4096-8f34-faebb1747752",
+ "EC2InstanceID": "i-0da29eb1a8a98768b",
+ "TaskEngine": {
+ "Tasks": [
+ {
+ "Arn": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4",
+ "Family": "secrets-state",
+ "Version": "1",
+ "Containers": [
+ {
+ "Name": "container_1",
+ "Image": "amazonlinux:1",
+ "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e",
+ "Command": [
+ "sleep",
+ "3600"
+ ],
+ "Cpu": 0,
+ "Memory": 512,
+ "Links": null,
+ "volumesFrom": [],
+ "mountPoints": [],
+ "portMappings": [],
+ "Essential": true,
+ "EntryPoint": null,
+ "environment": {},
+ "overrides": {
+ "command": null
+ },
+ "dockerConfig": {
+ "config": "{}",
+ "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}",
+ "version": "1.17"
+ },
+ "registryAuthentication": null,
+ "secrets": [
+ {
+ "name": "ssm-secret",
+ "valueFrom": "secret-value-from",
+ "provider": "ssm",
+ "containerPath": null,
+ "type": "ENVIRONMENT_VARIABLE",
+ "region": "us-west-2"
+ },
+ {
+ "name": "asm-secret",
+ "valueFrom": "secret-value-from",
+ "provider": "asm",
+ "containerPath": null,
+ "type": "ENVIRONMENT_VARIABLE",
+ "region": "us-west-2"
+ }
+ ],
+ "LogsAuthStrategy": "",
+ "desiredStatus": "RUNNING",
+ "KnownStatus": "RUNNING",
+ "TransitionDependencySet": {
+ "1": {
+ "ContainerDependencies": null,
+ "ResourceDependencies": [
+ {
+ "Name": "cgroup",
+ "RequiredStatus": 1
+ },
+ {
+ "Name": "ssmsecret",
+ "RequiredStatus": 1
+ },
+ {
+ "Name": "asmsecret",
+ "RequiredStatus": 1
+ }
+ ]
+ }
+ },
+ "RunDependencies": null,
+ "IsInternal": "NORMAL",
+ "ApplyingError": {
+ "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n",
+ "name": "CannotPullContainerError"
+ },
+ "SentStatus": "RUNNING",
+ "metadataFileUpdated": false,
+ "KnownExitCode": null,
+ "KnownPortBindings": null
+ }
+ ],
+ "resources": {
+ "asmsecret": [
+ {
+ "taskARN": "/ecs/33425c99-5db7-45fb-8244-bc94d00661e4",
+ "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244",
+ "createdAt": "0001-01-01T00:00:00Z",
+ "desiredStatus": "CREATED",
+ "knownStatus": "CREATED",
+ "secretResources": {
+ "secret-value-from_us-west-2": [
+ {
+ "name": "asm-secret",
+ "valueFrom": "secret-value-from",
+ "provider": "asm",
+ "containerPath": null,
+ "type": "ENVIRONMENT_VARIABLE",
+ "region": "us-west-2"
+ }
+ ]
+ }
+ }
+ ],
+ "ssmsecret": [
+ {
+ "taskARN": "/ecs/33425c99-5db7-45fb-8244-bc94d00661e4",
+ "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244",
+ "createdAt": "0001-01-01T00:00:00Z",
+ "desiredStatus": "CREATED",
+ "knownStatus": "CREATED",
+ "secretResources": {
+ "us-west-2": [
+ {
+ "name": "ssm-secret",
+ "valueFrom": "secret-value-from",
+ "provider": "ssm",
+ "containerPath": null,
+ "type": "ENVIRONMENT_VARIABLE",
+ "region": "us-west-2"
+ }
+ ]
+ }
+ }
+ ],
+ "cgroup": [
+ {
+ "cgroupRoot": "/ecs/33425c99-5db7-45fb-8244-bc94d00661e4",
+ "cgroupMountPath": "/sys/fs/cgroup",
+ "createdAt": "0001-01-01T00:00:00Z",
+ "desiredStatus": "CREATED",
+ "knownStatus": "CREATED",
+ "resourceSpec": {
+ "cpu": {
+ "shares": 2
+ }
+ }
+ }
+ ]
+ },
+ "volumes": [],
+ "DesiredStatus": "RUNNING",
+ "KnownStatus": "RUNNING",
+ "KnownTime": "2018-10-04T18:05:49.121835686Z",
+ "PullStartedAt": "2018-10-04T18:05:34.359798761Z",
+ "PullStoppedAt": "2018-10-04T18:05:48.445985904Z",
+ "ExecutionStoppedAt": "0001-01-01T00:00:00Z",
+ "SentStatus": "RUNNING",
+ "StartSequenceNumber": 2,
+ "StopSequenceNumber": 0,
+ "executionCredentialsID": "b1a6ede6-1a9f-4ab3-a02e-bd3e51b11244",
+ "ENI": null,
+ "MemoryCPULimitsEnabled": true,
+ "PlatformFields": {}
+ }
+ ],
+ "IdToContainer": {
+ "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": {
+ "DockerId": "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956",
+ "DockerName": "ecs-private-registry-state-1-container1-a68ef4b6e0fba38d3500",
+ "Container": {
+ "Name": "container_1",
+ "Image": "amazonlinux:1",
+ "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e",
+ "Command": [
+ "sleep",
+ "3600"
+ ],
+ "Cpu": 0,
+ "Memory": 512,
+ "Links": null,
+ "volumesFrom": [],
+ "mountPoints": [],
+ "portMappings": [],
+ "Essential": true,
+ "EntryPoint": null,
+ "environment": {},
+ "overrides": {
+ "command": null
+ },
+ "dockerConfig": {
+ "config": "{}",
+ "hostConfig": "{\"CapAdd\":[],\"CapDrop\":[]}",
+ "version": "1.17"
+ },
+ "registryAuthentication": null,
+ "LogsAuthStrategy": "",
+ "desiredStatus": "RUNNING",
+ "KnownStatus": "RUNNING",
+ "TransitionDependencySet": {
+ "1": {
+ "ContainerDependencies": null,
+ "ResourceDependencies": [
+ {
+ "Name": "cgroup",
+ "RequiredStatus": 1
+ },
+ {
+ "Name": "ssmsecret",
+ "RequiredStatus": 1
+ },
+ {
+ "Name": "asmsecret",
+ "RequiredStatus": 1
+ }
+ ]
+ }
+ },
+ "RunDependencies": null,
+ "IsInternal": "NORMAL",
+ "ApplyingError": {
+ "error": "API error (500): Get https://registry-1.docker.io/v2/library/amazonlinux/manifests/1: toomanyrequests: too many failed login attempts for username or IP address\n",
+ "name": "CannotPullContainerError"
+ },
+ "SentStatus": "RUNNING",
+ "metadataFileUpdated": false,
+ "KnownExitCode": null,
+ "KnownPortBindings": null
+ }
+ }
+ },
+ "IdToTask": {
+ "8f5e6e3091f221c876103289ddabcbcdeb64acd7ac7e2d0cf4da2be2be9d8956": "arn:aws:ecs:us-west-2:1234567890:task/33425c99-5db7-45fb-8244-bc94d00661e4"
+ },
+ "ImageStates": [
+ {
+ "Image": {
+ "ImageID": "sha256:7f929d2604c7e504a568eac9a2523c1b9e9b15e1fcee4076e1411a552913d08e",
+ "Names": [
+ "amazonlinux:1"
+ ],
+ "Size": 165452304
+ },
+ "PulledAt": "2018-10-04T18:05:48.445644088Z",
+ "LastUsedAt": "2018-10-04T18:05:48.445645342Z",
+ "PullSucceeded": false
+ }
+ ],
+ "ENIAttachments": null,
+ "IPToTask": {}
+ }
+ },
+ "Version": 18
+}
diff --git a/agent/taskresource/asmsecret/asmsecret.go b/agent/taskresource/asmsecret/asmsecret.go
new file mode 100644
index 00000000000..a495308ae7c
--- /dev/null
+++ b/agent/taskresource/asmsecret/asmsecret.go
@@ -0,0 +1,438 @@
+// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package asmsecret
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/cihub/seelog"
+ "github.com/pkg/errors"
+ "strings"
+ "sync"
+ "time"
+
+ apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
+ "github.com/aws/amazon-ecs-agent/agent/api/task/status"
+ "github.com/aws/amazon-ecs-agent/agent/asm"
+ "github.com/aws/amazon-ecs-agent/agent/asm/factory"
+ "github.com/aws/amazon-ecs-agent/agent/credentials"
+ "github.com/aws/amazon-ecs-agent/agent/taskresource"
+ resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
+)
+
+const (
+ // ResourceName is the name of the asmsecret resource
+ ResourceName = "asmsecret"
+)
+
+// ASMSecretResource represents secrets as a task resource.
+// The secrets are stored in AWS Secrets Manager.
+type ASMSecretResource struct {
+ taskARN string
+ createdAt time.Time
+ desiredStatusUnsafe resourcestatus.ResourceStatus
+ knownStatusUnsafe resourcestatus.ResourceStatus
+ // appliedStatus is the status that has been "applied" (e.g., we've called some
+ // operation such as 'Create' on the resource) but we don't yet know that the
+ // application was successful, which may then change the known status. This is
+ // used while progressing resource states in progressTask() of task manager
+ appliedStatus resourcestatus.ResourceStatus
+ resourceStatusToTransitionFunction map[resourcestatus.ResourceStatus]func() error
+ credentialsManager credentials.Manager
+ executionCredentialsID string
+
+ // map to store all asm deduped secrets in the task, key is a combination of valueFrom and region
+ requiredSecrets map[string]apicontainer.Secret
+ // map to store secret values, key is a combination of valueFrom and region
+ secretData map[string]string
+
+ // ssmClientCreator is a factory interface that creates new SSM clients. This is
+ // needed mostly for testing.
+ asmClientCreator factory.ClientCreator
+
+ // terminalReason should be set for resource creation failures. This ensures
+ // the resource object carries some context for why provisioning failed.
+ terminalReason string
+ terminalReasonOnce sync.Once
+
+ // lock is used for fields that are accessed and updated concurrently
+ lock sync.RWMutex
+}
+
+// NewASMSecretResource creates a new ASMSecretResource object
+func NewASMSecretResource(taskARN string,
+ asmSecrets map[string]apicontainer.Secret,
+ executionCredentialsID string,
+ credentialsManager credentials.Manager,
+ asmClientCreator factory.ClientCreator) *ASMSecretResource {
+
+ s := &ASMSecretResource{
+ taskARN: taskARN,
+ requiredSecrets: asmSecrets,
+ credentialsManager: credentialsManager,
+ executionCredentialsID: executionCredentialsID,
+ asmClientCreator: asmClientCreator,
+ }
+
+ s.initStatusToTransition()
+ return s
+}
+
+func (secret *ASMSecretResource) initStatusToTransition() {
+ resourceStatusToTransitionFunction := map[resourcestatus.ResourceStatus]func() error{
+ resourcestatus.ResourceStatus(ASMSecretCreated): secret.Create,
+ }
+ secret.resourceStatusToTransitionFunction = resourceStatusToTransitionFunction
+}
+
+func (secret *ASMSecretResource) setTerminalReason(reason string) {
+ secret.terminalReasonOnce.Do(func() {
+ seelog.Infof("ASM secret resource: setting terminal reason for asm secret resource in task: [%s]", secret.taskARN)
+ secret.terminalReason = reason
+ })
+}
+
+// GetTerminalReason returns an error string to propagate up through to task
+// state change messages
+func (secret *ASMSecretResource) GetTerminalReason() string {
+ return secret.terminalReason
+}
+
+// SetDesiredStatus safely sets the desired status of the resource
+func (secret *ASMSecretResource) SetDesiredStatus(status resourcestatus.ResourceStatus) {
+ secret.lock.Lock()
+ defer secret.lock.Unlock()
+
+ secret.desiredStatusUnsafe = status
+}
+
+// GetDesiredStatus safely returns the desired status of the task
+func (secret *ASMSecretResource) GetDesiredStatus() resourcestatus.ResourceStatus {
+ secret.lock.RLock()
+ defer secret.lock.RUnlock()
+
+ return secret.desiredStatusUnsafe
+}
+
+// GetName safely returns the name of the resource
+func (secret *ASMSecretResource) GetName() string {
+ secret.lock.RLock()
+ defer secret.lock.RUnlock()
+
+ return ResourceName
+}
+
+// DesiredTerminal returns true if the secret's desired status is REMOVED
+func (secret *ASMSecretResource) DesiredTerminal() bool {
+ secret.lock.RLock()
+ defer secret.lock.RUnlock()
+
+ return secret.desiredStatusUnsafe == resourcestatus.ResourceStatus(ASMSecretRemoved)
+}
+
+// KnownCreated returns true if the secret's known status is CREATED
+func (secret *ASMSecretResource) KnownCreated() bool {
+ secret.lock.RLock()
+ defer secret.lock.RUnlock()
+
+ return secret.knownStatusUnsafe == resourcestatus.ResourceStatus(ASMSecretCreated)
+}
+
+// TerminalStatus returns the last transition state of asmsecret
+func (secret *ASMSecretResource) TerminalStatus() resourcestatus.ResourceStatus {
+ return resourcestatus.ResourceStatus(ASMSecretRemoved)
+}
+
+// NextKnownState returns the state that the resource should
+// progress to based on its `KnownState`.
+func (secret *ASMSecretResource) NextKnownState() resourcestatus.ResourceStatus {
+ return secret.GetKnownStatus() + 1
+}
+
+// ApplyTransition calls the function required to move to the specified status
+func (secret *ASMSecretResource) ApplyTransition(nextState resourcestatus.ResourceStatus) error {
+ transitionFunc, ok := secret.resourceStatusToTransitionFunction[nextState]
+ if !ok {
+ return errors.Errorf("resource [%s]: transition to %s impossible", secret.GetName(),
+ secret.StatusString(nextState))
+ }
+ return transitionFunc()
+}
+
+// SteadyState returns the transition state of the resource defined as "ready"
+func (secret *ASMSecretResource) SteadyState() resourcestatus.ResourceStatus {
+ return resourcestatus.ResourceStatus(ASMSecretCreated)
+}
+
+// SetKnownStatus safely sets the currently known status of the resource
+func (secret *ASMSecretResource) SetKnownStatus(status resourcestatus.ResourceStatus) {
+ secret.lock.Lock()
+ defer secret.lock.Unlock()
+
+ secret.knownStatusUnsafe = status
+ secret.updateAppliedStatusUnsafe(status)
+}
+
+// updateAppliedStatusUnsafe updates the resource transitioning status
+func (secret *ASMSecretResource) updateAppliedStatusUnsafe(knownStatus resourcestatus.ResourceStatus) {
+ if secret.appliedStatus == resourcestatus.ResourceStatus(ASMSecretStatusNone) {
+ return
+ }
+
+ // Check if the resource transition has already finished
+ if secret.appliedStatus <= knownStatus {
+ secret.appliedStatus = resourcestatus.ResourceStatus(ASMSecretStatusNone)
+ }
+}
+
+// SetAppliedStatus sets the applied status of resource and returns whether
+// the resource is already in a transition
+func (secret *ASMSecretResource) SetAppliedStatus(status resourcestatus.ResourceStatus) bool {
+ secret.lock.Lock()
+ defer secret.lock.Unlock()
+
+ if secret.appliedStatus != resourcestatus.ResourceStatus(ASMSecretStatusNone) {
+ // return false to indicate the set operation failed
+ return false
+ }
+
+ secret.appliedStatus = status
+ return true
+}
+
+// GetKnownStatus safely returns the currently known status of the task
+func (secret *ASMSecretResource) GetKnownStatus() resourcestatus.ResourceStatus {
+ secret.lock.RLock()
+ defer secret.lock.RUnlock()
+
+ return secret.knownStatusUnsafe
+}
+
+// StatusString returns the string of the cgroup resource status
+func (secret *ASMSecretResource) StatusString(status resourcestatus.ResourceStatus) string {
+ return ASMSecretStatus(status).String()
+}
+
+// SetCreatedAt sets the timestamp for resource's creation time
+func (secret *ASMSecretResource) SetCreatedAt(createdAt time.Time) {
+ if createdAt.IsZero() {
+ return
+ }
+ secret.lock.Lock()
+ defer secret.lock.Unlock()
+
+ secret.createdAt = createdAt
+}
+
+// GetCreatedAt sets the timestamp for resource's creation time
+func (secret *ASMSecretResource) GetCreatedAt() time.Time {
+ secret.lock.RLock()
+ defer secret.lock.RUnlock()
+
+ return secret.createdAt
+}
+
+// It spins up multiple goroutines in order to retrieve values in parallel.
+func (secret *ASMSecretResource) Create() error {
+
+ // To fail fast, check execution role first
+ executionCredentials, ok := secret.credentialsManager.GetTaskCredentials(secret.getExecutionCredentialsID())
+ if !ok {
+ // No need to log here. managedTask.applyResourceState already does that
+ err := errors.New("ASM secret resource: unable to find execution role credentials")
+ secret.setTerminalReason(err.Error())
+ return err
+ }
+ iamCredentials := executionCredentials.GetIAMRoleCredentials()
+
+ var wg sync.WaitGroup
+
+ // Get the maximum number of errors to be returned, which will be one error per goroutine
+ errorEvents := make(chan error, len(secret.requiredSecrets))
+
+ seelog.Infof("ASM secret resource: retrieving secrets for containers in task: [%s]", secret.taskARN)
+ secret.secretData = make(map[string]string)
+
+ for _, asmsecret := range secret.getRequiredSecrets() {
+ wg.Add(1)
+ // Spin up goroutine per secret to speed up processing time
+ go secret.retrieveASMSecretValue(asmsecret, iamCredentials, &wg, errorEvents)
+ }
+
+ wg.Wait()
+ close(errorEvents)
+
+ if len(errorEvents) > 0 {
+ var terminalReasons []string
+ for err := range errorEvents {
+ terminalReasons = append(terminalReasons, err.Error())
+ }
+
+ errorString := strings.Join(terminalReasons, ";")
+ secret.setTerminalReason(errorString)
+ return errors.New(errorString)
+ }
+ return nil
+}
+
+// retrieveASMSecretValue reads secret value from cache first, if not exists, call GetSecretFromASM to retrieve value
+// AWS secrets Manager
+func (secret *ASMSecretResource) retrieveASMSecretValue(apiSecret apicontainer.Secret, iamCredentials credentials.IAMRoleCredentials, wg *sync.WaitGroup, errorEvents chan error) {
+ defer wg.Done()
+
+ asmClient := secret.asmClientCreator.NewASMClient(apiSecret.Region, iamCredentials)
+ seelog.Infof("ASM secret resource: retrieving resource for secret %v in region %s for task: [%s]", apiSecret.ValueFrom, apiSecret.Region, secret.taskARN)
+ //for asm secret, ValueFrom can be arn or name
+ secretValue, err := asm.GetSecretFromASM(apiSecret.ValueFrom, asmClient)
+ if err != nil {
+ errorEvents <- fmt.Errorf("fetching secret data from AWS Secrets Manager in region %s: %v", apiSecret.Region, err)
+ return
+ }
+
+ secret.lock.Lock()
+ defer secret.lock.Unlock()
+
+ // put secret value in secretData
+ secretKey := apiSecret.GetSecretResourceCacheKey()
+ secret.secretData[secretKey] = secretValue
+}
+
+// getRequiredSecrets returns the requiredSecrets field of asmsecret task resource
+func (secret *ASMSecretResource) getRequiredSecrets() map[string]apicontainer.Secret {
+ secret.lock.RLock()
+ defer secret.lock.RUnlock()
+
+ return secret.requiredSecrets
+}
+
+// getExecutionCredentialsID returns the execution role's credential ID
+func (secret *ASMSecretResource) getExecutionCredentialsID() string {
+ secret.lock.RLock()
+ defer secret.lock.RUnlock()
+
+ return secret.executionCredentialsID
+}
+
+// Cleanup removes the secret value created for the task
+func (secret *ASMSecretResource) Cleanup() error {
+ secret.clearASMSecretValue()
+ return nil
+}
+
+// clearASMSecretValue cycles through the collection of secret value data and
+// removes them from the task
+func (secret *ASMSecretResource) clearASMSecretValue() {
+ secret.lock.Lock()
+ defer secret.lock.Unlock()
+
+ for key := range secret.secretData {
+ delete(secret.secretData, key)
+ }
+}
+
+// GetCachedSecretValue retrieves the secret value from secretData field
+func (secret *ASMSecretResource) GetCachedSecretValue(secretKey string) (string, bool) {
+ secret.lock.RLock()
+ defer secret.lock.RUnlock()
+
+ s, ok := secret.secretData[secretKey]
+ return s, ok
+}
+
+// SetCachedSecretValue set the secret value in the secretData field given the key and value
+func (secret *ASMSecretResource) SetCachedSecretValue(secretKey string, secretValue string) {
+ secret.lock.Lock()
+ defer secret.lock.Unlock()
+
+ if secret.secretData == nil {
+ secret.secretData = make(map[string]string)
+ }
+
+ secret.secretData[secretKey] = secretValue
+}
+
+func (secret *ASMSecretResource) Initialize(resourceFields *taskresource.ResourceFields,
+ taskKnownStatus status.TaskStatus,
+ taskDesiredStatus status.TaskStatus) {
+ secret.initStatusToTransition()
+ secret.credentialsManager = resourceFields.CredentialsManager
+ secret.asmClientCreator = resourceFields.ASMClientCreator
+
+ // if task hasn't turn to 'created' status, and it's desire status is 'running'
+ // the resource status needs to be reset to 'NONE' status so the secret value
+ // will be retrieved again
+ if taskKnownStatus < status.TaskCreated &&
+ taskDesiredStatus <= status.TaskRunning {
+ secret.SetKnownStatus(resourcestatus.ResourceStatusNone)
+ }
+}
+
+type ASMSecretResourceJSON struct {
+ TaskARN string `json:"taskARN"`
+ CreatedAt *time.Time `json:"createdAt,omitempty"`
+ DesiredStatus *ASMSecretStatus `json:"desiredStatus"`
+ KnownStatus *ASMSecretStatus `json:"knownStatus"`
+ RequiredSecrets map[string]apicontainer.Secret `json:"secretResources"`
+ ExecutionCredentialsID string `json:"executionCredentialsID"`
+}
+
+// MarshalJSON serialises the ASMSecretResource struct to JSON
+func (secret *ASMSecretResource) MarshalJSON() ([]byte, error) {
+ if secret == nil {
+ return nil, errors.New("asmsecret resource is nil")
+ }
+ createdAt := secret.GetCreatedAt()
+ return json.Marshal(ASMSecretResourceJSON{
+ TaskARN: secret.taskARN,
+ CreatedAt: &createdAt,
+ DesiredStatus: func() *ASMSecretStatus {
+ desiredState := secret.GetDesiredStatus()
+ s := ASMSecretStatus(desiredState)
+ return &s
+ }(),
+ KnownStatus: func() *ASMSecretStatus {
+ knownState := secret.GetKnownStatus()
+ s := ASMSecretStatus(knownState)
+ return &s
+ }(),
+ RequiredSecrets: secret.getRequiredSecrets(),
+ ExecutionCredentialsID: secret.getExecutionCredentialsID(),
+ })
+}
+
+// UnmarshalJSON deserialises the raw JSON to a ASMSecretResource struct
+func (secret *ASMSecretResource) UnmarshalJSON(b []byte) error {
+ temp := ASMSecretResourceJSON{}
+
+ if err := json.Unmarshal(b, &temp); err != nil {
+ return err
+ }
+
+ if temp.DesiredStatus != nil {
+ secret.SetDesiredStatus(resourcestatus.ResourceStatus(*temp.DesiredStatus))
+ }
+ if temp.KnownStatus != nil {
+ secret.SetKnownStatus(resourcestatus.ResourceStatus(*temp.KnownStatus))
+ }
+ if temp.CreatedAt != nil && !temp.CreatedAt.IsZero() {
+ secret.SetCreatedAt(*temp.CreatedAt)
+ }
+ if temp.RequiredSecrets != nil {
+ secret.requiredSecrets = temp.RequiredSecrets
+ }
+ secret.taskARN = temp.TaskARN
+ secret.executionCredentialsID = temp.ExecutionCredentialsID
+
+ return nil
+}
diff --git a/agent/taskresource/asmsecret/asmsecret_test.go b/agent/taskresource/asmsecret/asmsecret_test.go
new file mode 100644
index 00000000000..98ca445567c
--- /dev/null
+++ b/agent/taskresource/asmsecret/asmsecret_test.go
@@ -0,0 +1,270 @@
+// +build unit
+
+// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package asmsecret
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "testing"
+ "time"
+
+ apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
+ apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
+ "github.com/aws/amazon-ecs-agent/agent/asm/factory/mocks"
+ "github.com/aws/amazon-ecs-agent/agent/asm/mocks"
+ "github.com/aws/amazon-ecs-agent/agent/credentials"
+ "github.com/aws/amazon-ecs-agent/agent/credentials/mocks"
+ "github.com/aws/amazon-ecs-agent/agent/taskresource"
+ resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/secretsmanager"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ executionCredentialsID = "exec-creds-id"
+ region1 = "us-west-2"
+ region2 = "us-east-1"
+ secretName1 = "db_username_1"
+ secretName2 = "db_username_2"
+ valueFrom1 = "secret-name"
+ secretKeyWest1 = "secret-name_us-west-2"
+ secretKeyEast1 = "secret-name_us-east-1"
+ secretValue = "secret-value"
+ taskARN = "task1"
+)
+
+func TestCreateWithMultipleASMCall(t *testing.T) {
+ requiredSecretData := map[string]apicontainer.Secret{
+ secretKeyWest1: {
+ Name: secretName1,
+ ValueFrom: valueFrom1,
+ Region: region1,
+ Provider: "asm",
+ },
+ secretKeyEast1: {
+ Name: secretName2,
+ ValueFrom: valueFrom1,
+ Region: region2,
+ Provider: "asm",
+ },
+ }
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ credentialsManager := mock_credentials.NewMockManager(ctrl)
+ asmClientCreator := mock_factory.NewMockClientCreator(ctrl)
+ mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl)
+
+ iamRoleCreds := credentials.IAMRoleCredentials{}
+ creds := credentials.TaskIAMRoleCredentials{
+ IAMRoleCredentials: iamRoleCreds,
+ }
+
+ asmSecretValue := &secretsmanager.GetSecretValueOutput{
+ SecretString: aws.String(secretValue),
+ }
+
+ credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true)
+ asmClientCreator.EXPECT().NewASMClient(region1, iamRoleCreds).Return(mockASMClient)
+ asmClientCreator.EXPECT().NewASMClient(region2, iamRoleCreds).Return(mockASMClient)
+ mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) {
+ assert.Equal(t, aws.StringValue(in.SecretId), valueFrom1)
+ }).Return(asmSecretValue, nil).Times(2)
+
+ asmRes := &ASMSecretResource{
+ executionCredentialsID: executionCredentialsID,
+ requiredSecrets: requiredSecretData,
+ credentialsManager: credentialsManager,
+ asmClientCreator: asmClientCreator,
+ }
+ require.NoError(t, asmRes.Create())
+
+ value1, ok := asmRes.GetCachedSecretValue(secretKeyWest1)
+ require.True(t, ok)
+ assert.Equal(t, secretValue, value1)
+
+ value2, ok := asmRes.GetCachedSecretValue(secretKeyEast1)
+ require.True(t, ok)
+ assert.Equal(t, secretValue, value2)
+}
+
+func TestCreateReturnMultipleErrors(t *testing.T) {
+
+ requiredSecretData := map[string]apicontainer.Secret{
+ secretKeyWest1: {
+ Name: secretName1,
+ ValueFrom: valueFrom1,
+ Region: region1,
+ Provider: "asm",
+ },
+ secretKeyEast1: {
+ Name: secretName2,
+ ValueFrom: valueFrom1,
+ Region: region2,
+ Provider: "asm",
+ },
+ }
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ credentialsManager := mock_credentials.NewMockManager(ctrl)
+ asmClientCreator := mock_factory.NewMockClientCreator(ctrl)
+ mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl)
+
+ iamRoleCreds := credentials.IAMRoleCredentials{}
+ creds := credentials.TaskIAMRoleCredentials{
+ IAMRoleCredentials: iamRoleCreds,
+ }
+
+ asmSecretValue := &secretsmanager.GetSecretValueOutput{}
+
+ credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true)
+ asmClientCreator.EXPECT().NewASMClient(region1, iamRoleCreds).Return(mockASMClient)
+ asmClientCreator.EXPECT().NewASMClient(region2, iamRoleCreds).Return(mockASMClient)
+ mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) {
+ assert.Equal(t, aws.StringValue(in.SecretId), valueFrom1)
+ }).Return(asmSecretValue, errors.New("error response")).Times(2)
+
+ asmRes := &ASMSecretResource{
+ executionCredentialsID: executionCredentialsID,
+ requiredSecrets: requiredSecretData,
+ credentialsManager: credentialsManager,
+ asmClientCreator: asmClientCreator,
+ }
+
+ assert.Error(t, asmRes.Create())
+ expectedError1 := fmt.Sprintf("fetching secret data from AWS Secrets Manager in region %s: secret %s: error response", region1, valueFrom1)
+ expectedError2 := fmt.Sprintf("fetching secret data from AWS Secrets Manager in region %s: secret %s: error response", region2, valueFrom1)
+ assert.Contains(t, asmRes.GetTerminalReason(), expectedError1)
+ assert.Contains(t, asmRes.GetTerminalReason(), expectedError2)
+}
+
+func TestCreateReturnError(t *testing.T) {
+ requiredSecretData := map[string]apicontainer.Secret{
+ secretKeyWest1: {
+ Name: secretName1,
+ ValueFrom: valueFrom1,
+ Region: region1,
+ Provider: "asm",
+ },
+ }
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ credentialsManager := mock_credentials.NewMockManager(ctrl)
+ asmClientCreator := mock_factory.NewMockClientCreator(ctrl)
+ mockASMClient := mock_secretsmanageriface.NewMockSecretsManagerAPI(ctrl)
+
+ iamRoleCreds := credentials.IAMRoleCredentials{}
+ creds := credentials.TaskIAMRoleCredentials{
+ IAMRoleCredentials: iamRoleCreds,
+ }
+
+ asmSecretValue := &secretsmanager.GetSecretValueOutput{}
+
+ gomock.InOrder(
+ credentialsManager.EXPECT().GetTaskCredentials(executionCredentialsID).Return(creds, true),
+ asmClientCreator.EXPECT().NewASMClient(region1, iamRoleCreds).Return(mockASMClient),
+ mockASMClient.EXPECT().GetSecretValue(gomock.Any()).Do(func(in *secretsmanager.GetSecretValueInput) {
+ assert.Equal(t, aws.StringValue(in.SecretId), valueFrom1)
+ }).Return(asmSecretValue, errors.New("error response")),
+ )
+ asmRes := &ASMSecretResource{
+ executionCredentialsID: executionCredentialsID,
+ requiredSecrets: requiredSecretData,
+ credentialsManager: credentialsManager,
+ asmClientCreator: asmClientCreator,
+ }
+
+ assert.Error(t, asmRes.Create())
+ expectedError := fmt.Sprintf("fetching secret data from AWS Secrets Manager in region %s: secret %s: error response", region1, valueFrom1)
+ assert.Equal(t, expectedError, asmRes.GetTerminalReason())
+}
+
+func TestMarshalUnmarshalJSON(t *testing.T) {
+ requiredSecretData := map[string]apicontainer.Secret{
+ secretKeyWest1: {
+ Name: secretName1,
+ ValueFrom: valueFrom1,
+ Region: region1,
+ Provider: "asm",
+ },
+ }
+
+ asmResIn := &ASMSecretResource{
+ taskARN: taskARN,
+ executionCredentialsID: executionCredentialsID,
+ createdAt: time.Now(),
+ knownStatusUnsafe: resourcestatus.ResourceCreated,
+ desiredStatusUnsafe: resourcestatus.ResourceCreated,
+ requiredSecrets: requiredSecretData,
+ }
+
+ bytes, err := json.Marshal(asmResIn)
+ require.NoError(t, err)
+
+ asmResOut := &ASMSecretResource{}
+ err = json.Unmarshal(bytes, asmResOut)
+ require.NoError(t, err)
+ assert.Equal(t, asmResIn.taskARN, asmResOut.taskARN)
+ assert.WithinDuration(t, asmResIn.createdAt, asmResOut.createdAt, time.Microsecond)
+ assert.Equal(t, asmResIn.desiredStatusUnsafe, asmResOut.desiredStatusUnsafe)
+ assert.Equal(t, asmResIn.knownStatusUnsafe, asmResOut.knownStatusUnsafe)
+ assert.Equal(t, asmResIn.executionCredentialsID, asmResOut.executionCredentialsID)
+ assert.Equal(t, len(asmResIn.requiredSecrets), len(asmResOut.requiredSecrets))
+ assert.Equal(t, asmResIn.requiredSecrets[secretKeyWest1], asmResOut.requiredSecrets[secretKeyWest1])
+}
+
+func TestInitialize(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ credentialsManager := mock_credentials.NewMockManager(ctrl)
+ asmClientCreator := mock_factory.NewMockClientCreator(ctrl)
+ asmRes := &ASMSecretResource{
+ knownStatusUnsafe: resourcestatus.ResourceCreated,
+ desiredStatusUnsafe: resourcestatus.ResourceCreated,
+ }
+ asmRes.Initialize(&taskresource.ResourceFields{
+ ResourceFieldsCommon: &taskresource.ResourceFieldsCommon{
+ ASMClientCreator: asmClientCreator,
+ CredentialsManager: credentialsManager,
+ },
+ }, apitaskstatus.TaskStatusNone, apitaskstatus.TaskRunning)
+ assert.Equal(t, resourcestatus.ResourceStatusNone, asmRes.GetKnownStatus())
+ assert.Equal(t, resourcestatus.ResourceCreated, asmRes.GetDesiredStatus())
+
+}
+
+func TestClearASMSecretValue(t *testing.T) {
+ secretValues := map[string]string{
+ "db_name": "db_value",
+ "secret_name": "secret_value",
+ }
+
+ asmRes := &ASMSecretResource{
+ secretData: secretValues,
+ }
+ asmRes.clearASMSecretValue()
+ assert.Equal(t, 0, len(asmRes.secretData))
+}
diff --git a/agent/taskresource/asmsecret/asmsecretstatus.go b/agent/taskresource/asmsecret/asmsecretstatus.go
new file mode 100644
index 00000000000..af0ba60f5cc
--- /dev/null
+++ b/agent/taskresource/asmsecret/asmsecretstatus.go
@@ -0,0 +1,78 @@
+// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package asmsecret
+
+import (
+ "errors"
+ "strings"
+
+ resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
+)
+
+type ASMSecretStatus resourcestatus.ResourceStatus
+
+const (
+ // is the zero state of a task resource
+ ASMSecretStatusNone ASMSecretStatus = iota
+ // represents a task resource which has been created
+ ASMSecretCreated
+ // represents a task resource which has been cleaned up
+ ASMSecretRemoved
+)
+
+var asmSecretStatusMap = map[string]ASMSecretStatus{
+ "NONE": ASMSecretStatusNone,
+ "CREATED": ASMSecretCreated,
+ "REMOVED": ASMSecretRemoved,
+}
+
+// StatusString returns a human readable string representation of this object
+func (as ASMSecretStatus) String() string {
+ for k, v := range asmSecretStatusMap {
+ if v == as {
+ return k
+ }
+ }
+ return "NONE"
+}
+
+// MarshalJSON overrides the logic for JSON-encoding the ResourceStatus type
+func (as *ASMSecretStatus) MarshalJSON() ([]byte, error) {
+ if as == nil {
+ return nil, errors.New("asmsecret resource status is nil")
+ }
+ return []byte(`"` + as.String() + `"`), nil
+}
+
+// UnmarshalJSON overrides the logic for parsing the JSON-encoded ResourceStatus data
+func (as *ASMSecretStatus) UnmarshalJSON(b []byte) error {
+ if strings.ToLower(string(b)) == "null" {
+ *as = ASMSecretStatusNone
+ return nil
+ }
+
+ if b[0] != '"' || b[len(b)-1] != '"' {
+ *as = ASMSecretStatusNone
+ return errors.New("resource status unmarshal: status must be a string or null; Got " + string(b))
+ }
+
+ strStatus := string(b[1 : len(b)-1])
+ stat, ok := asmSecretStatusMap[strStatus]
+ if !ok {
+ *as = ASMSecretStatusNone
+ return errors.New("resource status unmarshal: unrecognized status")
+ }
+ *as = stat
+ return nil
+}
diff --git a/agent/taskresource/asmsecret/asmsecretstatus_test.go b/agent/taskresource/asmsecret/asmsecretstatus_test.go
new file mode 100644
index 00000000000..442865fd51f
--- /dev/null
+++ b/agent/taskresource/asmsecret/asmsecretstatus_test.go
@@ -0,0 +1,157 @@
+// +build unit
+
+// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package asmsecret
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStatusString(t *testing.T) {
+ cases := []struct {
+ Name string
+ InASMSecretStatus ASMSecretStatus
+ OutASMSecretStatus string
+ }{
+ {
+ Name: "ToStringASMSecretStatusNone",
+ InASMSecretStatus: ASMSecretStatusNone,
+ OutASMSecretStatus: "NONE",
+ },
+ {
+ Name: "ToStringASMSecretCreated",
+ InASMSecretStatus: ASMSecretCreated,
+ OutASMSecretStatus: "CREATED",
+ },
+ {
+ Name: "ToStringASMSecretRemoved",
+ InASMSecretStatus: ASMSecretRemoved,
+ OutASMSecretStatus: "REMOVED",
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.Name, func(t *testing.T) {
+ assert.Equal(t, c.OutASMSecretStatus, c.InASMSecretStatus.String())
+ })
+ }
+}
+
+func TestMarshalNilASMSecretStatus(t *testing.T) {
+ var status *ASMSecretStatus
+ bytes, err := status.MarshalJSON()
+
+ assert.Nil(t, bytes)
+ assert.Error(t, err)
+}
+
+func TestMarshalASMSecretStatus(t *testing.T) {
+ cases := []struct {
+ Name string
+ InASMSecretStatus ASMSecretStatus
+ OutASMSecretStatus string
+ }{
+ {
+ Name: "MarshallASMSecretStatusNone",
+ InASMSecretStatus: ASMSecretStatusNone,
+ OutASMSecretStatus: "\"NONE\"",
+ },
+ {
+ Name: "MarshallASMSecretCreated",
+ InASMSecretStatus: ASMSecretCreated,
+ OutASMSecretStatus: "\"CREATED\"",
+ },
+ {
+ Name: "MarshallASMSecretRemoved",
+ InASMSecretStatus: ASMSecretRemoved,
+ OutASMSecretStatus: "\"REMOVED\"",
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.Name, func(t *testing.T) {
+ bytes, err := c.InASMSecretStatus.MarshalJSON()
+
+ assert.NoError(t, err)
+ assert.Equal(t, c.OutASMSecretStatus, string(bytes[:]))
+ })
+ }
+
+}
+
+func TestUnmarshalASMSecretStatus(t *testing.T) {
+ cases := []struct {
+ Name string
+ InASMSecretStatus string
+ OutASMSecretStatus ASMSecretStatus
+ ShouldError bool
+ }{
+ {
+ Name: "UnmarshallASMSecretStatusNone",
+ InASMSecretStatus: "\"NONE\"",
+ OutASMSecretStatus: ASMSecretStatusNone,
+ ShouldError: false,
+ },
+ {
+ Name: "UnmarshallASMSecretCreated",
+ InASMSecretStatus: "\"CREATED\"",
+ OutASMSecretStatus: ASMSecretCreated,
+ ShouldError: false,
+ },
+ {
+ Name: "UnmarshallASMSecretRemoved",
+ InASMSecretStatus: "\"REMOVED\"",
+ OutASMSecretStatus: ASMSecretRemoved,
+ ShouldError: false,
+ },
+ {
+ Name: "UnmarshallASMSecretStatusNull",
+ InASMSecretStatus: "null",
+ OutASMSecretStatus: ASMSecretStatusNone,
+ ShouldError: false,
+ },
+ {
+ Name: "UnmarshallASMSecretStatusNonString",
+ InASMSecretStatus: "1",
+ OutASMSecretStatus: ASMSecretStatusNone,
+ ShouldError: true,
+ },
+ {
+ Name: "UnmarshallASMSecretStatusUnmappedStatus",
+ InASMSecretStatus: "\"LOL\"",
+ OutASMSecretStatus: ASMSecretStatusNone,
+ ShouldError: true,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.Name, func(t *testing.T) {
+
+ var status ASMSecretStatus
+ err := json.Unmarshal([]byte(c.InASMSecretStatus), &status)
+
+ if c.ShouldError {
+ assert.Error(t, err)
+ } else {
+
+ assert.NoError(t, err)
+ assert.Equal(t, c.OutASMSecretStatus, status)
+ }
+ })
+ }
+}
diff --git a/agent/taskresource/ssmsecret/ssmsecret.go b/agent/taskresource/ssmsecret/ssmsecret.go
index 1a92c272ec5..aa158744b47 100644
--- a/agent/taskresource/ssmsecret/ssmsecret.go
+++ b/agent/taskresource/ssmsecret/ssmsecret.go
@@ -307,7 +307,7 @@ func (secret *SSMSecretResource) retrieveSSMSecretValuesByRegion(region string,
var secretNames []string
for _, s := range secrets {
- secretKey := s.GetSSMSecretResourceCacheKey()
+ secretKey := s.GetSecretResourceCacheKey()
if _, ok := secret.GetCachedSecretValue(secretKey); ok {
continue
}
@@ -392,6 +392,18 @@ func (secret *SSMSecretResource) GetCachedSecretValue(secretKey string) (string,
return s, ok
}
+// SetCachedSecretValue set the secret value in the secretData field given the key and value
+func (secret *SSMSecretResource) SetCachedSecretValue(secretKey string, secretValue string) {
+ secret.lock.Lock()
+ defer secret.lock.Unlock()
+
+ if secret.secretData == nil {
+ secret.secretData = make(map[string]string)
+ }
+
+ secret.secretData[secretKey] = secretValue
+}
+
func (secret *SSMSecretResource) Initialize(resourceFields *taskresource.ResourceFields,
taskKnownStatus status.TaskStatus,
taskDesiredStatus status.TaskStatus) {
diff --git a/agent/taskresource/types/types.go b/agent/taskresource/types/types.go
index aac5e69a527..4af7c61538f 100644
--- a/agent/taskresource/types/types.go
+++ b/agent/taskresource/types/types.go
@@ -19,6 +19,7 @@ import (
"github.com/aws/amazon-ecs-agent/agent/taskresource"
asmauthres "github.com/aws/amazon-ecs-agent/agent/taskresource/asmauth"
+ asmsecretres "github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret"
cgroupres "github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup"
ssmsecretres "github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret"
"github.com/aws/amazon-ecs-agent/agent/taskresource/volume"
@@ -33,6 +34,8 @@ const (
ASMAuthKey = asmauthres.ResourceName
// SSMSecretKey is the string used in resources map to represent ssm secret
SSMSecretKey = ssmsecretres.ResourceName
+ // ASMSecretKey is the string used in resources map to represent asm secret
+ ASMSecretKey = asmsecretres.ResourceName
)
// ResourcesMap represents the map of resource type to the corresponding resource
@@ -65,6 +68,10 @@ func (rm *ResourcesMap) UnmarshalJSON(data []byte) error {
if unmarshalSSMSecretKey(key, value, result) != nil {
return err
}
+ case ASMSecretKey:
+ if unmarshalASMSecretKey(key, value, result) != nil {
+ return err
+ }
default:
return errors.New("Unsupported resource type")
}
@@ -142,3 +149,21 @@ func unmarshalSSMSecretKey(key string, value json.RawMessage, result map[string]
}
return nil
}
+
+func unmarshalASMSecretKey(key string, value json.RawMessage, result map[string][]taskresource.TaskResource) error {
+ var asmsecrets []json.RawMessage
+ err := json.Unmarshal(value, &asmsecrets)
+ if err != nil {
+ return err
+ }
+
+ for _, secret := range asmsecrets {
+ res := &asmsecretres.ASMSecretResource{}
+ err := res.UnmarshalJSON(secret)
+ if err != nil {
+ return err
+ }
+ result[key] = append(result[key], res)
+ }
+ return nil
+}
diff --git a/agent/taskresource/types/types_test.go b/agent/taskresource/types/types_test.go
index a749cdf2bf4..2aa58eb77f4 100644
--- a/agent/taskresource/types/types_test.go
+++ b/agent/taskresource/types/types_test.go
@@ -18,15 +18,19 @@ package types
import (
"encoding/json"
"testing"
- "time"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
+ "github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret"
+ "github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret"
resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
"github.com/aws/amazon-ecs-agent/agent/taskresource/volume"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret"
+)
+
+const (
+ secretKeyWest1 = "/test/secretName_us-west-2"
+ asmSecretKeyWest1 = "arn:aws:secretsmanager:us-west-2:11111:secret:/test/secretName_us-west-2"
)
func TestMarshalUnmarshalVolumeResource(t *testing.T) {
@@ -60,17 +64,45 @@ func TestMarshalUnmarshalVolumeResource(t *testing.T) {
assert.Equal(t, unMarshalledVolumes[0].GetKnownStatus(), resourcestatus.ResourceStatusNone)
}
-
func TestMarshalUnmarshalSSMSecretResource(t *testing.T) {
- bytes := []byte(`{"ssmsecret":[{"TaskARN":"task_arn","RequiredSecrets":{"us-west-2":[]},"CreatedAt":"0001-01-01T00:00:00Z","DesiredStatus":"CREATED","KnownStatus":"REMOVED"}]}`)
+ resources := make(map[string][]taskresource.TaskResource)
+ ssmSecrets := []taskresource.TaskResource{
+ &ssmsecret.SSMSecretResource{},
+ }
+
+ ssmSecrets[0].SetDesiredStatus(resourcestatus.ResourceCreated)
+ ssmSecrets[0].SetKnownStatus(resourcestatus.ResourceStatusNone)
+
+ resources["ssmsecret"] = ssmSecrets
+ data, err := json.Marshal(resources)
+ require.NoError(t, err)
- unmarshalledMap := make(ResourcesMap)
- err := unmarshalledMap.UnmarshalJSON(bytes)
+ var unMarshalledResource ResourcesMap
+ err = json.Unmarshal(data, &unMarshalledResource)
assert.NoError(t, err)
+ unMarshalledSSMSecret, ok := unMarshalledResource["ssmsecret"]
+ assert.True(t, ok)
+ assert.Equal(t, unMarshalledSSMSecret[0].GetDesiredStatus(), resourcestatus.ResourceCreated)
+ assert.Equal(t, unMarshalledSSMSecret[0].GetKnownStatus(), resourcestatus.ResourceStatusNone)
+}
+
+func TestMarshalUnmarshalASMSecretResource(t *testing.T) {
+ resources := make(map[string][]taskresource.TaskResource)
+ asmSecrets := []taskresource.TaskResource{
+ &asmsecret.ASMSecretResource{},
+ }
+ asmSecrets[0].SetDesiredStatus(resourcestatus.ResourceCreated)
+ asmSecrets[0].SetKnownStatus(resourcestatus.ResourceStatusNone)
- ssmRes := unmarshalledMap["ssmsecret"][0].(*ssmsecret.SSMSecretResource)
- assert.Equal(t, "ssmsecret", ssmRes.GetName())
- assert.Equal(t, time.Time{}, ssmRes.GetCreatedAt())
- assert.Equal(t, resourcestatus.ResourceCreated, ssmRes.GetDesiredStatus())
- assert.Equal(t, resourcestatus.ResourceRemoved, ssmRes.GetKnownStatus())
+ resources["asmsecret"] = asmSecrets
+ data, err := json.Marshal(resources)
+ require.NoError(t, err)
+
+ var unMarshalledResource ResourcesMap
+ err = json.Unmarshal(data, &unMarshalledResource)
+ assert.NoError(t, err)
+ unMarshalledASMSecret, ok := unMarshalledResource["asmsecret"]
+ assert.True(t, ok)
+ assert.Equal(t, unMarshalledASMSecret[0].GetDesiredStatus(), resourcestatus.ResourceCreated)
+ assert.Equal(t, unMarshalledASMSecret[0].GetKnownStatus(), resourcestatus.ResourceStatusNone)
}
diff --git a/agent/taskresource/types_common.go b/agent/taskresource/types_common.go
index 3dcea48a8fe..c9fdcad8be4 100644
--- a/agent/taskresource/types_common.go
+++ b/agent/taskresource/types_common.go
@@ -14,10 +14,10 @@
package taskresource
import (
- "github.com/aws/amazon-ecs-agent/agent/credentials"
- "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper"
asmfactory "github.com/aws/amazon-ecs-agent/agent/asm/factory"
+ "github.com/aws/amazon-ecs-agent/agent/credentials"
ssmfactory "github.com/aws/amazon-ecs-agent/agent/ssm/factory"
+ "github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper"
)
type ResourceFieldsCommon struct {
diff --git a/agent/utils/mobypkgwrapper/mocks/pluginswrapper_mocks.go b/agent/utils/mobypkgwrapper/mocks/pluginswrapper_mocks.go
index bff3c799da4..c28c7c17335 100644
--- a/agent/utils/mobypkgwrapper/mocks/pluginswrapper_mocks.go
+++ b/agent/utils/mobypkgwrapper/mocks/pluginswrapper_mocks.go
@@ -11,43 +11,50 @@
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
-// Automatically generated by MockGen. DO NOT EDIT!
-// Source: github.com/aws/amazon-ecs-agent/agent/utils/ioutilwrapper (interfaces: IOUtil)
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/aws/amazon-ecs-agent/agent/utils/mobypkgwrapper (interfaces: Plugins)
+// Package mock_mobypkgwrapper is a generated GoMock package.
package mock_mobypkgwrapper
import (
+ reflect "reflect"
+
gomock "github.com/golang/mock/gomock"
)
-// Mock of Plugins interface
+// MockPlugins is a mock of Plugins interface
type MockPlugins struct {
ctrl *gomock.Controller
- recorder *_MockPluginsRecorder
+ recorder *MockPluginsMockRecorder
}
-// Recorder for MockPlugins (not exported)
-type _MockPluginsRecorder struct {
+// MockPluginsMockRecorder is the mock recorder for MockPlugins
+type MockPluginsMockRecorder struct {
mock *MockPlugins
}
+// NewMockPlugins creates a new mock instance
func NewMockPlugins(ctrl *gomock.Controller) *MockPlugins {
mock := &MockPlugins{ctrl: ctrl}
- mock.recorder = &_MockPluginsRecorder{mock}
+ mock.recorder = &MockPluginsMockRecorder{mock}
return mock
}
-func (_m *MockPlugins) EXPECT() *_MockPluginsRecorder {
- return _m.recorder
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockPlugins) EXPECT() *MockPluginsMockRecorder {
+ return m.recorder
}
-func (_m *MockPlugins) Scan() ([]string, error) {
- ret := _m.ctrl.Call(_m, "Scan")
+// Scan mocks base method
+func (m *MockPlugins) Scan() ([]string, error) {
+ ret := m.ctrl.Call(m, "Scan")
ret0, _ := ret[0].([]string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
-func (_mr *_MockPluginsRecorder) Scan() *gomock.Call {
- return _mr.mock.ctrl.RecordCall(_mr.mock, "Scan")
+// Scan indicates an expected call of Scan
+func (mr *MockPluginsMockRecorder) Scan() *gomock.Call {
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scan", reflect.TypeOf((*MockPlugins)(nil).Scan))
}
diff --git a/agent/vendor/github.com/Nvveen/Gotty/LICENSE b/agent/vendor/github.com/Nvveen/Gotty/LICENSE
deleted file mode 100644
index 0b71c97360e..00000000000
--- a/agent/vendor/github.com/Nvveen/Gotty/LICENSE
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com)
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-The views and conclusions contained in the software and documentation are those
-of the authors and should not be interpreted as representing official policies,
-either expressed or implied, of the FreeBSD Project.
diff --git a/agent/vendor/github.com/Nvveen/Gotty/README b/agent/vendor/github.com/Nvveen/Gotty/README
deleted file mode 100644
index a6b0d9a8fe2..00000000000
--- a/agent/vendor/github.com/Nvveen/Gotty/README
+++ /dev/null
@@ -1,5 +0,0 @@
-Gotty is a library written in Go that determines and reads termcap database
-files to produce an interface for interacting with the capabilities of a
-terminal.
-See the godoc documentation or the source code for more information about
-function usage.
diff --git a/agent/vendor/github.com/Nvveen/Gotty/TODO b/agent/vendor/github.com/Nvveen/Gotty/TODO
deleted file mode 100644
index 470460531ca..00000000000
--- a/agent/vendor/github.com/Nvveen/Gotty/TODO
+++ /dev/null
@@ -1,3 +0,0 @@
-gotty.go:// TODO add more concurrency to name lookup, look for more opportunities.
-all:// TODO add more documentation, with function usage in a doc.go file.
-all:// TODO add more testing/benchmarking with go test.
diff --git a/agent/vendor/github.com/Nvveen/Gotty/attributes.go b/agent/vendor/github.com/Nvveen/Gotty/attributes.go
deleted file mode 100644
index a4c005fae58..00000000000
--- a/agent/vendor/github.com/Nvveen/Gotty/attributes.go
+++ /dev/null
@@ -1,514 +0,0 @@
-// Copyright 2012 Neal van Veen. All rights reserved.
-// Usage of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package gotty
-
-// Boolean capabilities
-var BoolAttr = [...]string{
- "auto_left_margin", "bw",
- "auto_right_margin", "am",
- "no_esc_ctlc", "xsb",
- "ceol_standout_glitch", "xhp",
- "eat_newline_glitch", "xenl",
- "erase_overstrike", "eo",
- "generic_type", "gn",
- "hard_copy", "hc",
- "has_meta_key", "km",
- "has_status_line", "hs",
- "insert_null_glitch", "in",
- "memory_above", "da",
- "memory_below", "db",
- "move_insert_mode", "mir",
- "move_standout_mode", "msgr",
- "over_strike", "os",
- "status_line_esc_ok", "eslok",
- "dest_tabs_magic_smso", "xt",
- "tilde_glitch", "hz",
- "transparent_underline", "ul",
- "xon_xoff", "nxon",
- "needs_xon_xoff", "nxon",
- "prtr_silent", "mc5i",
- "hard_cursor", "chts",
- "non_rev_rmcup", "nrrmc",
- "no_pad_char", "npc",
- "non_dest_scroll_region", "ndscr",
- "can_change", "ccc",
- "back_color_erase", "bce",
- "hue_lightness_saturation", "hls",
- "col_addr_glitch", "xhpa",
- "cr_cancels_micro_mode", "crxm",
- "has_print_wheel", "daisy",
- "row_addr_glitch", "xvpa",
- "semi_auto_right_margin", "sam",
- "cpi_changes_res", "cpix",
- "lpi_changes_res", "lpix",
- "backspaces_with_bs", "",
- "crt_no_scrolling", "",
- "no_correctly_working_cr", "",
- "gnu_has_meta_key", "",
- "linefeed_is_newline", "",
- "has_hardware_tabs", "",
- "return_does_clr_eol", "",
-}
-
-// Numerical capabilities
-var NumAttr = [...]string{
- "columns", "cols",
- "init_tabs", "it",
- "lines", "lines",
- "lines_of_memory", "lm",
- "magic_cookie_glitch", "xmc",
- "padding_baud_rate", "pb",
- "virtual_terminal", "vt",
- "width_status_line", "wsl",
- "num_labels", "nlab",
- "label_height", "lh",
- "label_width", "lw",
- "max_attributes", "ma",
- "maximum_windows", "wnum",
- "max_colors", "colors",
- "max_pairs", "pairs",
- "no_color_video", "ncv",
- "buffer_capacity", "bufsz",
- "dot_vert_spacing", "spinv",
- "dot_horz_spacing", "spinh",
- "max_micro_address", "maddr",
- "max_micro_jump", "mjump",
- "micro_col_size", "mcs",
- "micro_line_size", "mls",
- "number_of_pins", "npins",
- "output_res_char", "orc",
- "output_res_line", "orl",
- "output_res_horz_inch", "orhi",
- "output_res_vert_inch", "orvi",
- "print_rate", "cps",
- "wide_char_size", "widcs",
- "buttons", "btns",
- "bit_image_entwining", "bitwin",
- "bit_image_type", "bitype",
- "magic_cookie_glitch_ul", "",
- "carriage_return_delay", "",
- "new_line_delay", "",
- "backspace_delay", "",
- "horizontal_tab_delay", "",
- "number_of_function_keys", "",
-}
-
-// String capabilities
-var StrAttr = [...]string{
- "back_tab", "cbt",
- "bell", "bel",
- "carriage_return", "cr",
- "change_scroll_region", "csr",
- "clear_all_tabs", "tbc",
- "clear_screen", "clear",
- "clr_eol", "el",
- "clr_eos", "ed",
- "column_address", "hpa",
- "command_character", "cmdch",
- "cursor_address", "cup",
- "cursor_down", "cud1",
- "cursor_home", "home",
- "cursor_invisible", "civis",
- "cursor_left", "cub1",
- "cursor_mem_address", "mrcup",
- "cursor_normal", "cnorm",
- "cursor_right", "cuf1",
- "cursor_to_ll", "ll",
- "cursor_up", "cuu1",
- "cursor_visible", "cvvis",
- "delete_character", "dch1",
- "delete_line", "dl1",
- "dis_status_line", "dsl",
- "down_half_line", "hd",
- "enter_alt_charset_mode", "smacs",
- "enter_blink_mode", "blink",
- "enter_bold_mode", "bold",
- "enter_ca_mode", "smcup",
- "enter_delete_mode", "smdc",
- "enter_dim_mode", "dim",
- "enter_insert_mode", "smir",
- "enter_secure_mode", "invis",
- "enter_protected_mode", "prot",
- "enter_reverse_mode", "rev",
- "enter_standout_mode", "smso",
- "enter_underline_mode", "smul",
- "erase_chars", "ech",
- "exit_alt_charset_mode", "rmacs",
- "exit_attribute_mode", "sgr0",
- "exit_ca_mode", "rmcup",
- "exit_delete_mode", "rmdc",
- "exit_insert_mode", "rmir",
- "exit_standout_mode", "rmso",
- "exit_underline_mode", "rmul",
- "flash_screen", "flash",
- "form_feed", "ff",
- "from_status_line", "fsl",
- "init_1string", "is1",
- "init_2string", "is2",
- "init_3string", "is3",
- "init_file", "if",
- "insert_character", "ich1",
- "insert_line", "il1",
- "insert_padding", "ip",
- "key_backspace", "kbs",
- "key_catab", "ktbc",
- "key_clear", "kclr",
- "key_ctab", "kctab",
- "key_dc", "kdch1",
- "key_dl", "kdl1",
- "key_down", "kcud1",
- "key_eic", "krmir",
- "key_eol", "kel",
- "key_eos", "ked",
- "key_f0", "kf0",
- "key_f1", "kf1",
- "key_f10", "kf10",
- "key_f2", "kf2",
- "key_f3", "kf3",
- "key_f4", "kf4",
- "key_f5", "kf5",
- "key_f6", "kf6",
- "key_f7", "kf7",
- "key_f8", "kf8",
- "key_f9", "kf9",
- "key_home", "khome",
- "key_ic", "kich1",
- "key_il", "kil1",
- "key_left", "kcub1",
- "key_ll", "kll",
- "key_npage", "knp",
- "key_ppage", "kpp",
- "key_right", "kcuf1",
- "key_sf", "kind",
- "key_sr", "kri",
- "key_stab", "khts",
- "key_up", "kcuu1",
- "keypad_local", "rmkx",
- "keypad_xmit", "smkx",
- "lab_f0", "lf0",
- "lab_f1", "lf1",
- "lab_f10", "lf10",
- "lab_f2", "lf2",
- "lab_f3", "lf3",
- "lab_f4", "lf4",
- "lab_f5", "lf5",
- "lab_f6", "lf6",
- "lab_f7", "lf7",
- "lab_f8", "lf8",
- "lab_f9", "lf9",
- "meta_off", "rmm",
- "meta_on", "smm",
- "newline", "_glitch",
- "pad_char", "npc",
- "parm_dch", "dch",
- "parm_delete_line", "dl",
- "parm_down_cursor", "cud",
- "parm_ich", "ich",
- "parm_index", "indn",
- "parm_insert_line", "il",
- "parm_left_cursor", "cub",
- "parm_right_cursor", "cuf",
- "parm_rindex", "rin",
- "parm_up_cursor", "cuu",
- "pkey_key", "pfkey",
- "pkey_local", "pfloc",
- "pkey_xmit", "pfx",
- "print_screen", "mc0",
- "prtr_off", "mc4",
- "prtr_on", "mc5",
- "repeat_char", "rep",
- "reset_1string", "rs1",
- "reset_2string", "rs2",
- "reset_3string", "rs3",
- "reset_file", "rf",
- "restore_cursor", "rc",
- "row_address", "mvpa",
- "save_cursor", "row_address",
- "scroll_forward", "ind",
- "scroll_reverse", "ri",
- "set_attributes", "sgr",
- "set_tab", "hts",
- "set_window", "wind",
- "tab", "s_magic_smso",
- "to_status_line", "tsl",
- "underline_char", "uc",
- "up_half_line", "hu",
- "init_prog", "iprog",
- "key_a1", "ka1",
- "key_a3", "ka3",
- "key_b2", "kb2",
- "key_c1", "kc1",
- "key_c3", "kc3",
- "prtr_non", "mc5p",
- "char_padding", "rmp",
- "acs_chars", "acsc",
- "plab_norm", "pln",
- "key_btab", "kcbt",
- "enter_xon_mode", "smxon",
- "exit_xon_mode", "rmxon",
- "enter_am_mode", "smam",
- "exit_am_mode", "rmam",
- "xon_character", "xonc",
- "xoff_character", "xoffc",
- "ena_acs", "enacs",
- "label_on", "smln",
- "label_off", "rmln",
- "key_beg", "kbeg",
- "key_cancel", "kcan",
- "key_close", "kclo",
- "key_command", "kcmd",
- "key_copy", "kcpy",
- "key_create", "kcrt",
- "key_end", "kend",
- "key_enter", "kent",
- "key_exit", "kext",
- "key_find", "kfnd",
- "key_help", "khlp",
- "key_mark", "kmrk",
- "key_message", "kmsg",
- "key_move", "kmov",
- "key_next", "knxt",
- "key_open", "kopn",
- "key_options", "kopt",
- "key_previous", "kprv",
- "key_print", "kprt",
- "key_redo", "krdo",
- "key_reference", "kref",
- "key_refresh", "krfr",
- "key_replace", "krpl",
- "key_restart", "krst",
- "key_resume", "kres",
- "key_save", "ksav",
- "key_suspend", "kspd",
- "key_undo", "kund",
- "key_sbeg", "kBEG",
- "key_scancel", "kCAN",
- "key_scommand", "kCMD",
- "key_scopy", "kCPY",
- "key_screate", "kCRT",
- "key_sdc", "kDC",
- "key_sdl", "kDL",
- "key_select", "kslt",
- "key_send", "kEND",
- "key_seol", "kEOL",
- "key_sexit", "kEXT",
- "key_sfind", "kFND",
- "key_shelp", "kHLP",
- "key_shome", "kHOM",
- "key_sic", "kIC",
- "key_sleft", "kLFT",
- "key_smessage", "kMSG",
- "key_smove", "kMOV",
- "key_snext", "kNXT",
- "key_soptions", "kOPT",
- "key_sprevious", "kPRV",
- "key_sprint", "kPRT",
- "key_sredo", "kRDO",
- "key_sreplace", "kRPL",
- "key_sright", "kRIT",
- "key_srsume", "kRES",
- "key_ssave", "kSAV",
- "key_ssuspend", "kSPD",
- "key_sundo", "kUND",
- "req_for_input", "rfi",
- "key_f11", "kf11",
- "key_f12", "kf12",
- "key_f13", "kf13",
- "key_f14", "kf14",
- "key_f15", "kf15",
- "key_f16", "kf16",
- "key_f17", "kf17",
- "key_f18", "kf18",
- "key_f19", "kf19",
- "key_f20", "kf20",
- "key_f21", "kf21",
- "key_f22", "kf22",
- "key_f23", "kf23",
- "key_f24", "kf24",
- "key_f25", "kf25",
- "key_f26", "kf26",
- "key_f27", "kf27",
- "key_f28", "kf28",
- "key_f29", "kf29",
- "key_f30", "kf30",
- "key_f31", "kf31",
- "key_f32", "kf32",
- "key_f33", "kf33",
- "key_f34", "kf34",
- "key_f35", "kf35",
- "key_f36", "kf36",
- "key_f37", "kf37",
- "key_f38", "kf38",
- "key_f39", "kf39",
- "key_f40", "kf40",
- "key_f41", "kf41",
- "key_f42", "kf42",
- "key_f43", "kf43",
- "key_f44", "kf44",
- "key_f45", "kf45",
- "key_f46", "kf46",
- "key_f47", "kf47",
- "key_f48", "kf48",
- "key_f49", "kf49",
- "key_f50", "kf50",
- "key_f51", "kf51",
- "key_f52", "kf52",
- "key_f53", "kf53",
- "key_f54", "kf54",
- "key_f55", "kf55",
- "key_f56", "kf56",
- "key_f57", "kf57",
- "key_f58", "kf58",
- "key_f59", "kf59",
- "key_f60", "kf60",
- "key_f61", "kf61",
- "key_f62", "kf62",
- "key_f63", "kf63",
- "clr_bol", "el1",
- "clear_margins", "mgc",
- "set_left_margin", "smgl",
- "set_right_margin", "smgr",
- "label_format", "fln",
- "set_clock", "sclk",
- "display_clock", "dclk",
- "remove_clock", "rmclk",
- "create_window", "cwin",
- "goto_window", "wingo",
- "hangup", "hup",
- "dial_phone", "dial",
- "quick_dial", "qdial",
- "tone", "tone",
- "pulse", "pulse",
- "flash_hook", "hook",
- "fixed_pause", "pause",
- "wait_tone", "wait",
- "user0", "u0",
- "user1", "u1",
- "user2", "u2",
- "user3", "u3",
- "user4", "u4",
- "user5", "u5",
- "user6", "u6",
- "user7", "u7",
- "user8", "u8",
- "user9", "u9",
- "orig_pair", "op",
- "orig_colors", "oc",
- "initialize_color", "initc",
- "initialize_pair", "initp",
- "set_color_pair", "scp",
- "set_foreground", "setf",
- "set_background", "setb",
- "change_char_pitch", "cpi",
- "change_line_pitch", "lpi",
- "change_res_horz", "chr",
- "change_res_vert", "cvr",
- "define_char", "defc",
- "enter_doublewide_mode", "swidm",
- "enter_draft_quality", "sdrfq",
- "enter_italics_mode", "sitm",
- "enter_leftward_mode", "slm",
- "enter_micro_mode", "smicm",
- "enter_near_letter_quality", "snlq",
- "enter_normal_quality", "snrmq",
- "enter_shadow_mode", "sshm",
- "enter_subscript_mode", "ssubm",
- "enter_superscript_mode", "ssupm",
- "enter_upward_mode", "sum",
- "exit_doublewide_mode", "rwidm",
- "exit_italics_mode", "ritm",
- "exit_leftward_mode", "rlm",
- "exit_micro_mode", "rmicm",
- "exit_shadow_mode", "rshm",
- "exit_subscript_mode", "rsubm",
- "exit_superscript_mode", "rsupm",
- "exit_upward_mode", "rum",
- "micro_column_address", "mhpa",
- "micro_down", "mcud1",
- "micro_left", "mcub1",
- "micro_right", "mcuf1",
- "micro_row_address", "mvpa",
- "micro_up", "mcuu1",
- "order_of_pins", "porder",
- "parm_down_micro", "mcud",
- "parm_left_micro", "mcub",
- "parm_right_micro", "mcuf",
- "parm_up_micro", "mcuu",
- "select_char_set", "scs",
- "set_bottom_margin", "smgb",
- "set_bottom_margin_parm", "smgbp",
- "set_left_margin_parm", "smglp",
- "set_right_margin_parm", "smgrp",
- "set_top_margin", "smgt",
- "set_top_margin_parm", "smgtp",
- "start_bit_image", "sbim",
- "start_char_set_def", "scsd",
- "stop_bit_image", "rbim",
- "stop_char_set_def", "rcsd",
- "subscript_characters", "subcs",
- "superscript_characters", "supcs",
- "these_cause_cr", "docr",
- "zero_motion", "zerom",
- "char_set_names", "csnm",
- "key_mouse", "kmous",
- "mouse_info", "minfo",
- "req_mouse_pos", "reqmp",
- "get_mouse", "getm",
- "set_a_foreground", "setaf",
- "set_a_background", "setab",
- "pkey_plab", "pfxl",
- "device_type", "devt",
- "code_set_init", "csin",
- "set0_des_seq", "s0ds",
- "set1_des_seq", "s1ds",
- "set2_des_seq", "s2ds",
- "set3_des_seq", "s3ds",
- "set_lr_margin", "smglr",
- "set_tb_margin", "smgtb",
- "bit_image_repeat", "birep",
- "bit_image_newline", "binel",
- "bit_image_carriage_return", "bicr",
- "color_names", "colornm",
- "define_bit_image_region", "defbi",
- "end_bit_image_region", "endbi",
- "set_color_band", "setcolor",
- "set_page_length", "slines",
- "display_pc_char", "dispc",
- "enter_pc_charset_mode", "smpch",
- "exit_pc_charset_mode", "rmpch",
- "enter_scancode_mode", "smsc",
- "exit_scancode_mode", "rmsc",
- "pc_term_options", "pctrm",
- "scancode_escape", "scesc",
- "alt_scancode_esc", "scesa",
- "enter_horizontal_hl_mode", "ehhlm",
- "enter_left_hl_mode", "elhlm",
- "enter_low_hl_mode", "elohlm",
- "enter_right_hl_mode", "erhlm",
- "enter_top_hl_mode", "ethlm",
- "enter_vertical_hl_mode", "evhlm",
- "set_a_attributes", "sgr1",
- "set_pglen_inch", "slength",
- "termcap_init2", "",
- "termcap_reset", "",
- "linefeed_if_not_lf", "",
- "backspace_if_not_bs", "",
- "other_non_function_keys", "",
- "arrow_key_map", "",
- "acs_ulcorner", "",
- "acs_llcorner", "",
- "acs_urcorner", "",
- "acs_lrcorner", "",
- "acs_ltee", "",
- "acs_rtee", "",
- "acs_btee", "",
- "acs_ttee", "",
- "acs_hline", "",
- "acs_vline", "",
- "acs_plus", "",
- "memory_lock", "",
- "memory_unlock", "",
- "box_chars_1", "",
-}
diff --git a/agent/vendor/github.com/Nvveen/Gotty/gotty.go b/agent/vendor/github.com/Nvveen/Gotty/gotty.go
deleted file mode 100644
index 093cbf37e1b..00000000000
--- a/agent/vendor/github.com/Nvveen/Gotty/gotty.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2012 Neal van Veen. All rights reserved.
-// Usage of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Gotty is a Go-package for reading and parsing the terminfo database
-package gotty
-
-// TODO add more concurrency to name lookup, look for more opportunities.
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
- "os"
- "reflect"
- "strings"
- "sync"
-)
-
-// Open a terminfo file by the name given and construct a TermInfo object.
-// If something went wrong reading the terminfo database file, an error is
-// returned.
-func OpenTermInfo(termName string) (*TermInfo, error) {
- var term *TermInfo
- var err error
- // Find the environment variables
- termloc := os.Getenv("TERMINFO")
- if len(termloc) == 0 {
- // Search like ncurses
- locations := []string{os.Getenv("HOME") + "/.terminfo/", "/etc/terminfo/",
- "/lib/terminfo/", "/usr/share/terminfo/"}
- var path string
- for _, str := range locations {
- // Construct path
- path = str + string(termName[0]) + "/" + termName
- // Check if path can be opened
- file, _ := os.Open(path)
- if file != nil {
- // Path can open, fall out and use current path
- file.Close()
- break
- }
- }
- if len(path) > 0 {
- term, err = readTermInfo(path)
- } else {
- err = errors.New(fmt.Sprintf("No terminfo file(-location) found"))
- }
- }
- return term, err
-}
-
-// Open a terminfo file from the environment variable containing the current
-// terminal name and construct a TermInfo object. If something went wrong
-// reading the terminfo database file, an error is returned.
-func OpenTermInfoEnv() (*TermInfo, error) {
- termenv := os.Getenv("TERM")
- return OpenTermInfo(termenv)
-}
-
-// Return an attribute by the name attr provided. If none can be found,
-// an error is returned.
-func (term *TermInfo) GetAttribute(attr string) (stacker, error) {
- // Channel to store the main value in.
- var value stacker
- // Add a blocking WaitGroup
- var block sync.WaitGroup
- // Keep track of variable being written.
- written := false
- // Function to put into goroutine.
- f := func(ats interface{}) {
- var ok bool
- var v stacker
- // Switch on type of map to use and assign value to it.
- switch reflect.TypeOf(ats).Elem().Kind() {
- case reflect.Bool:
- v, ok = ats.(map[string]bool)[attr]
- case reflect.Int16:
- v, ok = ats.(map[string]int16)[attr]
- case reflect.String:
- v, ok = ats.(map[string]string)[attr]
- }
- // If ok, a value is found, so we can write.
- if ok {
- value = v
- written = true
- }
- // Goroutine is done
- block.Done()
- }
- block.Add(3)
- // Go for all 3 attribute lists.
- go f(term.boolAttributes)
- go f(term.numAttributes)
- go f(term.strAttributes)
- // Wait until every goroutine is done.
- block.Wait()
- // If a value has been written, return it.
- if written {
- return value, nil
- }
- // Otherwise, error.
- return nil, fmt.Errorf("Erorr finding attribute")
-}
-
-// Return an attribute by the name attr provided. If none can be found,
-// an error is returned. A name is first converted to its termcap value.
-func (term *TermInfo) GetAttributeName(name string) (stacker, error) {
- tc := GetTermcapName(name)
- return term.GetAttribute(tc)
-}
-
-// A utility function that finds and returns the termcap equivalent of a
-// variable name.
-func GetTermcapName(name string) string {
- // Termcap name
- var tc string
- // Blocking group
- var wait sync.WaitGroup
- // Function to put into a goroutine
- f := func(attrs []string) {
- // Find the string corresponding to the name
- for i, s := range attrs {
- if s == name {
- tc = attrs[i+1]
- }
- }
- // Goroutine is finished
- wait.Done()
- }
- wait.Add(3)
- // Go for all 3 attribute lists
- go f(BoolAttr[:])
- go f(NumAttr[:])
- go f(StrAttr[:])
- // Wait until every goroutine is done
- wait.Wait()
- // Return the termcap name
- return tc
-}
-
-// This function takes a path to a terminfo file and reads it in binary
-// form to construct the actual TermInfo file.
-func readTermInfo(path string) (*TermInfo, error) {
- // Open the terminfo file
- file, err := os.Open(path)
- defer file.Close()
- if err != nil {
- return nil, err
- }
-
- // magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize
- // Header is composed of the magic 0432 octal number, size of the name
- // section, size of the boolean section, the amount of number values,
- // the number of offsets of strings, and the size of the string section.
- var header [6]int16
- // Byte array is used to read in byte values
- var byteArray []byte
- // Short array is used to read in short values
- var shArray []int16
- // TermInfo object to store values
- var term TermInfo
-
- // Read in the header
- err = binary.Read(file, binary.LittleEndian, &header)
- if err != nil {
- return nil, err
- }
- // If magic number isn't there or isn't correct, we have the wrong filetype
- if header[0] != 0432 {
- return nil, errors.New(fmt.Sprintf("Wrong filetype"))
- }
-
- // Read in the names
- byteArray = make([]byte, header[1])
- err = binary.Read(file, binary.LittleEndian, &byteArray)
- if err != nil {
- return nil, err
- }
- term.Names = strings.Split(string(byteArray), "|")
-
- // Read in the booleans
- byteArray = make([]byte, header[2])
- err = binary.Read(file, binary.LittleEndian, &byteArray)
- if err != nil {
- return nil, err
- }
- term.boolAttributes = make(map[string]bool)
- for i, b := range byteArray {
- if b == 1 {
- term.boolAttributes[BoolAttr[i*2+1]] = true
- }
- }
- // If the number of bytes read is not even, a byte for alignment is added
- if len(byteArray)%2 != 0 {
- err = binary.Read(file, binary.LittleEndian, make([]byte, 1))
- if err != nil {
- return nil, err
- }
- }
-
- // Read in shorts
- shArray = make([]int16, header[3])
- err = binary.Read(file, binary.LittleEndian, &shArray)
- if err != nil {
- return nil, err
- }
- term.numAttributes = make(map[string]int16)
- for i, n := range shArray {
- if n != 0377 && n > -1 {
- term.numAttributes[NumAttr[i*2+1]] = n
- }
- }
-
- // Read the offsets into the short array
- shArray = make([]int16, header[4])
- err = binary.Read(file, binary.LittleEndian, &shArray)
- if err != nil {
- return nil, err
- }
- // Read the actual strings in the byte array
- byteArray = make([]byte, header[5])
- err = binary.Read(file, binary.LittleEndian, &byteArray)
- if err != nil {
- return nil, err
- }
- term.strAttributes = make(map[string]string)
- // We get an offset, and then iterate until the string is null-terminated
- for i, offset := range shArray {
- if offset > -1 {
- r := offset
- for ; byteArray[r] != 0; r++ {
- }
- term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r])
- }
- }
- return &term, nil
-}
diff --git a/agent/vendor/github.com/Nvveen/Gotty/parser.go b/agent/vendor/github.com/Nvveen/Gotty/parser.go
deleted file mode 100644
index a9d5d23c542..00000000000
--- a/agent/vendor/github.com/Nvveen/Gotty/parser.go
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright 2012 Neal van Veen. All rights reserved.
-// Usage of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package gotty
-
-import (
- "bytes"
- "errors"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-var exp = [...]string{
- "%%",
- "%c",
- "%s",
- "%p(\\d)",
- "%P([A-z])",
- "%g([A-z])",
- "%'(.)'",
- "%{([0-9]+)}",
- "%l",
- "%\\+|%-|%\\*|%/|%m",
- "%&|%\\||%\\^",
- "%=|%>|%<",
- "%A|%O",
- "%!|%~",
- "%i",
- "%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]",
- "%\\?(.*?);",
-}
-
-var regex *regexp.Regexp
-var staticVar map[byte]stacker
-
-// Parses the attribute that is received with name attr and parameters params.
-func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) {
- // Get the attribute name first.
- iface, err := term.GetAttribute(attr)
- str, ok := iface.(string)
- if err != nil {
- return "", err
- }
- if !ok {
- return str, errors.New("Only string capabilities can be parsed.")
- }
- // Construct the hidden parser struct so we can use a recursive stack based
- // parser.
- ps := &parser{}
- // Dynamic variables only exist in this context.
- ps.dynamicVar = make(map[byte]stacker, 26)
- ps.parameters = make([]stacker, len(params))
- // Convert the parameters to insert them into the parser struct.
- for i, x := range params {
- ps.parameters[i] = x
- }
- // Recursively walk and return.
- result, err := ps.walk(str)
- return result, err
-}
-
-// Parses the attribute that is received with name attr and parameters params.
-// Only works on full name of a capability that is given, which it uses to
-// search for the termcap name.
-func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) {
- tc := GetTermcapName(attr)
- return term.Parse(tc, params)
-}
-
-// Identify each token in a stack based manner and do the actual parsing.
-func (ps *parser) walk(attr string) (string, error) {
- // We use a buffer to get the modified string.
- var buf bytes.Buffer
- // Next, find and identify all tokens by their indices and strings.
- tokens := regex.FindAllStringSubmatch(attr, -1)
- if len(tokens) == 0 {
- return attr, nil
- }
- indices := regex.FindAllStringIndex(attr, -1)
- q := 0 // q counts the matches of one token
- // Iterate through the string per character.
- for i := 0; i < len(attr); i++ {
- // If the current position is an identified token, execute the following
- // steps.
- if q < len(indices) && i >= indices[q][0] && i < indices[q][1] {
- // Switch on token.
- switch {
- case tokens[q][0][:2] == "%%":
- // Literal percentage character.
- buf.WriteByte('%')
- case tokens[q][0][:2] == "%c":
- // Pop a character.
- c, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- buf.WriteByte(c.(byte))
- case tokens[q][0][:2] == "%s":
- // Pop a string.
- str, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- if _, ok := str.(string); !ok {
- return buf.String(), errors.New("Stack head is not a string")
- }
- buf.WriteString(str.(string))
- case tokens[q][0][:2] == "%p":
- // Push a parameter on the stack.
- index, err := strconv.ParseInt(tokens[q][1], 10, 8)
- index--
- if err != nil {
- return buf.String(), err
- }
- if int(index) >= len(ps.parameters) {
- return buf.String(), errors.New("Parameters index out of bound")
- }
- ps.st.push(ps.parameters[index])
- case tokens[q][0][:2] == "%P":
- // Pop a variable from the stack as a dynamic or static variable.
- val, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- index := tokens[q][2]
- if len(index) > 1 {
- errorStr := fmt.Sprintf("%s is not a valid dynamic variables index",
- index)
- return buf.String(), errors.New(errorStr)
- }
- // Specify either dynamic or static.
- if index[0] >= 'a' && index[0] <= 'z' {
- ps.dynamicVar[index[0]] = val
- } else if index[0] >= 'A' && index[0] <= 'Z' {
- staticVar[index[0]] = val
- }
- case tokens[q][0][:2] == "%g":
- // Push a variable from the stack as a dynamic or static variable.
- index := tokens[q][3]
- if len(index) > 1 {
- errorStr := fmt.Sprintf("%s is not a valid static variables index",
- index)
- return buf.String(), errors.New(errorStr)
- }
- var val stacker
- if index[0] >= 'a' && index[0] <= 'z' {
- val = ps.dynamicVar[index[0]]
- } else if index[0] >= 'A' && index[0] <= 'Z' {
- val = staticVar[index[0]]
- }
- ps.st.push(val)
- case tokens[q][0][:2] == "%'":
- // Push a character constant.
- con := tokens[q][4]
- if len(con) > 1 {
- errorStr := fmt.Sprintf("%s is not a valid character constant", con)
- return buf.String(), errors.New(errorStr)
- }
- ps.st.push(con[0])
- case tokens[q][0][:2] == "%{":
- // Push an integer constant.
- con, err := strconv.ParseInt(tokens[q][5], 10, 32)
- if err != nil {
- return buf.String(), err
- }
- ps.st.push(con)
- case tokens[q][0][:2] == "%l":
- // Push the length of the string that is popped from the stack.
- popStr, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- if _, ok := popStr.(string); !ok {
- errStr := fmt.Sprintf("Stack head is not a string")
- return buf.String(), errors.New(errStr)
- }
- ps.st.push(len(popStr.(string)))
- case tokens[q][0][:2] == "%?":
- // If-then-else construct. First, the whole string is identified and
- // then inside this substring, we can specify which parts to switch on.
- ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);")
- ifTokens := ifReg.FindStringSubmatch(tokens[q][0])
- var (
- ifStr string
- err error
- )
- // Parse the if-part to determine if-else.
- if len(ifTokens[1]) > 0 {
- ifStr, err = ps.walk(ifTokens[1])
- } else { // else
- ifStr, err = ps.walk(ifTokens[4])
- }
- // Return any errors
- if err != nil {
- return buf.String(), err
- } else if len(ifStr) > 0 {
- // Self-defined limitation, not sure if this is correct, but didn't
- // seem like it.
- return buf.String(), errors.New("If-clause cannot print statements")
- }
- var thenStr string
- // Pop the first value that is set by parsing the if-clause.
- choose, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- // Switch to if or else.
- if choose.(int) == 0 && len(ifTokens[1]) > 0 {
- thenStr, err = ps.walk(ifTokens[3])
- } else if choose.(int) != 0 {
- if len(ifTokens[1]) > 0 {
- thenStr, err = ps.walk(ifTokens[2])
- } else {
- thenStr, err = ps.walk(ifTokens[5])
- }
- }
- if err != nil {
- return buf.String(), err
- }
- buf.WriteString(thenStr)
- case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing
- fallthrough
- case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits.
- fallthrough
- case tokens[q][0][len(tokens[q][0])-1] == 'x':
- fallthrough
- case tokens[q][0][len(tokens[q][0])-1] == 'X':
- fallthrough
- case tokens[q][0][len(tokens[q][0])-1] == 's':
- token := tokens[q][0]
- // Remove the : that comes before a flag.
- if token[1] == ':' {
- token = token[:1] + token[2:]
- }
- digit, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- // The rest is determined like the normal formatted prints.
- digitStr := fmt.Sprintf(token, digit.(int))
- buf.WriteString(digitStr)
- case tokens[q][0][:2] == "%i":
- // Increment the parameters by one.
- if len(ps.parameters) < 2 {
- return buf.String(), errors.New("Not enough parameters to increment.")
- }
- val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int)
- val1++
- val2++
- ps.parameters[0], ps.parameters[1] = val1, val2
- default:
- // The rest of the tokens is a special case, where two values are
- // popped and then operated on by the token that comes after them.
- op1, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- op2, err := ps.st.pop()
- if err != nil {
- return buf.String(), err
- }
- var result stacker
- switch tokens[q][0][:2] {
- case "%+":
- // Addition
- result = op2.(int) + op1.(int)
- case "%-":
- // Subtraction
- result = op2.(int) - op1.(int)
- case "%*":
- // Multiplication
- result = op2.(int) * op1.(int)
- case "%/":
- // Division
- result = op2.(int) / op1.(int)
- case "%m":
- // Modulo
- result = op2.(int) % op1.(int)
- case "%&":
- // Bitwise AND
- result = op2.(int) & op1.(int)
- case "%|":
- // Bitwise OR
- result = op2.(int) | op1.(int)
- case "%^":
- // Bitwise XOR
- result = op2.(int) ^ op1.(int)
- case "%=":
- // Equals
- result = op2 == op1
- case "%>":
- // Greater-than
- result = op2.(int) > op1.(int)
- case "%<":
- // Lesser-than
- result = op2.(int) < op1.(int)
- case "%A":
- // Logical AND
- result = op2.(bool) && op1.(bool)
- case "%O":
- // Logical OR
- result = op2.(bool) || op1.(bool)
- case "%!":
- // Logical complement
- result = !op1.(bool)
- case "%~":
- // Bitwise complement
- result = ^(op1.(int))
- }
- ps.st.push(result)
- }
-
- i = indices[q][1] - 1
- q++
- } else {
- // We are not "inside" a token, so just skip until the end or the next
- // token, and add all characters to the buffer.
- j := i
- if q != len(indices) {
- for !(j >= indices[q][0] && j < indices[q][1]) {
- j++
- }
- } else {
- j = len(attr)
- }
- buf.WriteString(string(attr[i:j]))
- i = j
- }
- }
- // Return the buffer as a string.
- return buf.String(), nil
-}
-
-// Push a stacker-value onto the stack.
-func (st *stack) push(s stacker) {
- *st = append(*st, s)
-}
-
-// Pop a stacker-value from the stack.
-func (st *stack) pop() (stacker, error) {
- if len(*st) == 0 {
- return nil, errors.New("Stack is empty.")
- }
- newStack := make(stack, len(*st)-1)
- val := (*st)[len(*st)-1]
- copy(newStack, (*st)[:len(*st)-1])
- *st = newStack
- return val, nil
-}
-
-// Initialize regexes and the static vars (that don't get changed between
-// calls.
-func init() {
- // Initialize the main regex.
- expStr := strings.Join(exp[:], "|")
- regex, _ = regexp.Compile(expStr)
- // Initialize the static variables.
- staticVar = make(map[byte]stacker, 26)
-}
diff --git a/agent/vendor/github.com/Nvveen/Gotty/types.go b/agent/vendor/github.com/Nvveen/Gotty/types.go
deleted file mode 100644
index 9bcc65e9b88..00000000000
--- a/agent/vendor/github.com/Nvveen/Gotty/types.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2012 Neal van Veen. All rights reserved.
-// Usage of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package gotty
-
-type TermInfo struct {
- boolAttributes map[string]bool
- numAttributes map[string]int16
- strAttributes map[string]string
- // The various names of the TermInfo file.
- Names []string
-}
-
-type stacker interface {
-}
-type stack []stacker
-
-type parser struct {
- st stack
- parameters []stacker
- dynamicVar map[byte]stacker
-}
diff --git a/agent/vendor/github.com/beorn7/perks/LICENSE b/agent/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 00000000000..339177be663
--- /dev/null
+++ b/agent/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/agent/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/agent/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 00000000000..1602287d7ce
--- /dev/null
+++ b/agent/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/agent/vendor/github.com/beorn7/perks/quantile/stream.go b/agent/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 00000000000..d7d14f8eb63
--- /dev/null
+++ b/agent/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,316 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targetMap map[float64]float64) *Stream {
+ // Convert map to slice to avoid slow iterations on a map.
+ // ƒ is called on the hot path, so converting the map to a slice
+ // beforehand results in significant CPU savings.
+ targets := targetMapToSlice(targetMap)
+
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for _, t := range targets {
+ if t.quantile*s.n <= r {
+ f = (2 * t.epsilon * r) / t.quantile
+ } else {
+ f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+type target struct {
+ quantile float64
+ epsilon float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+ targets := make([]target, 0, len(targetMap))
+
+ for quantile, epsilon := range targetMap {
+ t := target{
+ quantile: quantile,
+ epsilon: epsilon,
+ }
+ targets = append(targets, t)
+ }
+
+ return targets
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(math.Ceil(float64(l) * q))
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/agent/vendor/github.com/go-ini/ini/.gitignore b/agent/vendor/github.com/go-ini/ini/.gitignore
deleted file mode 100644
index 12411127b39..00000000000
--- a/agent/vendor/github.com/go-ini/ini/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-testdata/conf_out.ini
-ini.sublime-project
-ini.sublime-workspace
-testdata/conf_reflect.ini
-.idea
-/.vscode
diff --git a/agent/vendor/github.com/go-ini/ini/.travis.yml b/agent/vendor/github.com/go-ini/ini/.travis.yml
deleted file mode 100644
index b097527e1a9..00000000000
--- a/agent/vendor/github.com/go-ini/ini/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-sudo: false
-language: go
-go:
- - 1.5.x
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
-
-script:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/smartystreets/goconvey
- - mkdir -p $HOME/gopath/src/gopkg.in
- - ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1
- - go test -v -cover -race
diff --git a/agent/vendor/github.com/go-ini/ini/LICENSE b/agent/vendor/github.com/go-ini/ini/LICENSE
deleted file mode 100644
index d361bbcdf5c..00000000000
--- a/agent/vendor/github.com/go-ini/ini/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright 2014 Unknwon
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/agent/vendor/github.com/go-ini/ini/Makefile b/agent/vendor/github.com/go-ini/ini/Makefile
deleted file mode 100644
index 1316911d2df..00000000000
--- a/agent/vendor/github.com/go-ini/ini/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-.PHONY: build test bench vet coverage
-
-build: vet bench
-
-test:
- go test -v -cover -race
-
-bench:
- go test -v -cover -race -test.bench=. -test.benchmem
-
-vet:
- go vet
-
-coverage:
- go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out
\ No newline at end of file
diff --git a/agent/vendor/github.com/go-ini/ini/README.md b/agent/vendor/github.com/go-ini/ini/README.md
deleted file mode 100644
index f4ff27cd30c..00000000000
--- a/agent/vendor/github.com/go-ini/ini/README.md
+++ /dev/null
@@ -1,763 +0,0 @@
-INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge)
-===
-
-![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
-
-Package ini provides INI file read and write functionality in Go.
-
-[简体中文](README_ZH.md)
-
-## Feature
-
-- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
-- Read with recursion values.
-- Read with parent-child sections.
-- Read with auto-increment key names.
-- Read with multiple-line values.
-- Read with tons of helper methods.
-- Read and convert values to Go types.
-- Read and **WRITE** comments of sections and keys.
-- Manipulate sections, keys and comments with ease.
-- Keep sections and keys in order as you parse and save.
-
-## Installation
-
-To use a tagged revision:
-
- go get gopkg.in/ini.v1
-
-To use with latest changes:
-
- go get github.com/go-ini/ini
-
-Please add `-u` flag to update in the future.
-
-### Testing
-
-If you want to test on your machine, please apply `-t` flag:
-
- go get -t gopkg.in/ini.v1
-
-Please add `-u` flag to update in the future.
-
-## Getting Started
-
-### Loading from data sources
-
-A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error.
-
-```go
-cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
-```
-
-Or start with an empty object:
-
-```go
-cfg := ini.Empty()
-```
-
-When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later.
-
-```go
-err := cfg.Append("other file", []byte("other raw data"))
-```
-
-If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error.
-
-```go
-cfg, err := ini.LooseLoad("filename", "filename_404")
-```
-
-The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
-
-#### Ignore cases of key name
-
-When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing.
-
-```go
-cfg, err := ini.InsensitiveLoad("filename")
-//...
-
-// sec1 and sec2 are the exactly same section object
-sec1, err := cfg.GetSection("Section")
-sec2, err := cfg.GetSection("SecTIOn")
-
-// key1 and key2 are the exactly same key object
-key1, err := sec1.GetKey("Key")
-key2, err := sec2.GetKey("KeY")
-```
-
-#### MySQL-like boolean key
-
-MySQL's configuration allows a key without value as follows:
-
-```ini
-[mysqld]
-...
-skip-host-cache
-skip-name-resolve
-```
-
-By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options:
-
-```go
-cfg, err := ini.LoadSources(ini.LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
-```
-
-The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
-
-To generate such keys in your program, you could use `NewBooleanKey`:
-
-```go
-key, err := sec.NewBooleanKey("skip-host-cache")
-```
-
-#### Comment
-
-Take care that following format will be treated as comment:
-
-1. Line begins with `#` or `;`
-2. Words after `#` or `;`
-3. Words after section name (i.e words after `[some section name]`)
-
-If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```.
-
-Alternatively, you can use following `LoadOptions` to completely ignore inline comments:
-
-```go
-cfg, err := ini.LoadSources(ini.LoadOptions{IgnoreInlineComment: true}, "app.ini"))
-```
-
-### Working with sections
-
-To get a section, you would need to:
-
-```go
-section, err := cfg.GetSection("section name")
-```
-
-For a shortcut for default section, just give an empty string as name:
-
-```go
-section, err := cfg.GetSection("")
-```
-
-When you're pretty sure the section exists, following code could make your life easier:
-
-```go
-section := cfg.Section("section name")
-```
-
-What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
-
-To create a new section:
-
-```go
-err := cfg.NewSection("new section")
-```
-
-To get a list of sections or section names:
-
-```go
-sections := cfg.Sections()
-names := cfg.SectionStrings()
-```
-
-### Working with keys
-
-To get a key under a section:
-
-```go
-key, err := cfg.Section("").GetKey("key name")
-```
-
-Same rule applies to key operations:
-
-```go
-key := cfg.Section("").Key("key name")
-```
-
-To check if a key exists:
-
-```go
-yes := cfg.Section("").HasKey("key name")
-```
-
-To create a new key:
-
-```go
-err := cfg.Section("").NewKey("name", "value")
-```
-
-To get a list of keys or key names:
-
-```go
-keys := cfg.Section("").Keys()
-names := cfg.Section("").KeyStrings()
-```
-
-To get a clone hash of keys and corresponding values:
-
-```go
-hash := cfg.Section("").KeysHash()
-```
-
-### Working with values
-
-To get a string value:
-
-```go
-val := cfg.Section("").Key("key name").String()
-```
-
-To validate key value on the fly:
-
-```go
-val := cfg.Section("").Key("key name").Validate(func(in string) string {
- if len(in) == 0 {
- return "default"
- }
- return in
-})
-```
-
-If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
-
-```go
-val := cfg.Section("").Key("key name").Value()
-```
-
-To check if raw value exists:
-
-```go
-yes := cfg.Section("").HasValue("test value")
-```
-
-To get value with types:
-
-```go
-// For boolean values:
-// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
-// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
-v, err = cfg.Section("").Key("BOOL").Bool()
-v, err = cfg.Section("").Key("FLOAT64").Float64()
-v, err = cfg.Section("").Key("INT").Int()
-v, err = cfg.Section("").Key("INT64").Int64()
-v, err = cfg.Section("").Key("UINT").Uint()
-v, err = cfg.Section("").Key("UINT64").Uint64()
-v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
-v, err = cfg.Section("").Key("TIME").Time() // RFC3339
-
-v = cfg.Section("").Key("BOOL").MustBool()
-v = cfg.Section("").Key("FLOAT64").MustFloat64()
-v = cfg.Section("").Key("INT").MustInt()
-v = cfg.Section("").Key("INT64").MustInt64()
-v = cfg.Section("").Key("UINT").MustUint()
-v = cfg.Section("").Key("UINT64").MustUint64()
-v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
-v = cfg.Section("").Key("TIME").MustTime() // RFC3339
-
-// Methods start with Must also accept one argument for default value
-// when key not found or fail to parse value to given type.
-// Except method MustString, which you have to pass a default value.
-
-v = cfg.Section("").Key("String").MustString("default")
-v = cfg.Section("").Key("BOOL").MustBool(true)
-v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
-v = cfg.Section("").Key("INT").MustInt(10)
-v = cfg.Section("").Key("INT64").MustInt64(99)
-v = cfg.Section("").Key("UINT").MustUint(3)
-v = cfg.Section("").Key("UINT64").MustUint64(6)
-v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
-v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
-```
-
-What if my value is three-line long?
-
-```ini
-[advance]
-ADDRESS = """404 road,
-NotFound, State, 5000
-Earth"""
-```
-
-Not a problem!
-
-```go
-cfg.Section("advance").Key("ADDRESS").String()
-
-/* --- start ---
-404 road,
-NotFound, State, 5000
-Earth
------- end --- */
-```
-
-That's cool, how about continuation lines?
-
-```ini
-[advance]
-two_lines = how about \
- continuation lines?
-lots_of_lines = 1 \
- 2 \
- 3 \
- 4
-```
-
-Piece of cake!
-
-```go
-cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
-cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
-```
-
-Well, I hate continuation lines, how do I disable that?
-
-```go
-cfg, err := ini.LoadSources(ini.LoadOptions{
- IgnoreContinuation: true,
-}, "filename")
-```
-
-Holy crap!
-
-Note that single quotes around values will be stripped:
-
-```ini
-foo = "some value" // foo: some value
-bar = 'some value' // bar: some value
-```
-
-Sometimes you downloaded file from [Crowdin](https://crowdin.com/) has values like the following (value is surrounded by double quotes and quotes in the value are escaped):
-
-```ini
-create_repo="created repository %s"
-```
-
-How do you transform this to regular format automatically?
-
-```go
-cfg, err := ini.LoadSources(ini.LoadOptions{UnescapeValueDoubleQuotes: true}, "en-US.ini"))
-cfg.Section("").Key("create_repo").String()
-// You got: created repository %s
-```
-
-That's all? Hmm, no.
-
-#### Helper methods of working with values
-
-To get value with given candidates:
-
-```go
-v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
-v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
-v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
-v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
-v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
-v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
-v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
-v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
-```
-
-Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
-
-To validate value in a given range:
-
-```go
-vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
-vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
-vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
-vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
-vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
-vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
-vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
-```
-
-##### Auto-split values into a slice
-
-To use zero value of type for invalid inputs:
-
-```go
-// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
-// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
-vals = cfg.Section("").Key("STRINGS").Strings(",")
-vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
-vals = cfg.Section("").Key("INTS").Ints(",")
-vals = cfg.Section("").Key("INT64S").Int64s(",")
-vals = cfg.Section("").Key("UINTS").Uints(",")
-vals = cfg.Section("").Key("UINT64S").Uint64s(",")
-vals = cfg.Section("").Key("TIMES").Times(",")
-```
-
-To exclude invalid values out of result slice:
-
-```go
-// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
-// Input: how, 2.2, are, you -> [2.2]
-vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
-vals = cfg.Section("").Key("INTS").ValidInts(",")
-vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
-vals = cfg.Section("").Key("UINTS").ValidUints(",")
-vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
-vals = cfg.Section("").Key("TIMES").ValidTimes(",")
-```
-
-Or to return nothing but error when have invalid inputs:
-
-```go
-// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
-// Input: how, 2.2, are, you -> error
-vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
-vals = cfg.Section("").Key("INTS").StrictInts(",")
-vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
-vals = cfg.Section("").Key("UINTS").StrictUints(",")
-vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
-vals = cfg.Section("").Key("TIMES").StrictTimes(",")
-```
-
-### Save your configuration
-
-Finally, it's time to save your configuration to somewhere.
-
-A typical way to save configuration is writing it to a file:
-
-```go
-// ...
-err = cfg.SaveTo("my.ini")
-err = cfg.SaveToIndent("my.ini", "\t")
-```
-
-Another way to save is writing to a `io.Writer` interface:
-
-```go
-// ...
-cfg.WriteTo(writer)
-cfg.WriteToIndent(writer, "\t")
-```
-
-By default, spaces are used to align "=" sign between key and values, to disable that:
-
-```go
-ini.PrettyFormat = false
-```
-
-## Advanced Usage
-
-### Recursive Values
-
-For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
-
-```ini
-NAME = ini
-
-[author]
-NAME = Unknwon
-GITHUB = https://github.com/%(NAME)s
-
-[package]
-FULL_NAME = github.com/go-ini/%(NAME)s
-```
-
-```go
-cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
-cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
-```
-
-### Parent-child Sections
-
-You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
-
-```ini
-NAME = ini
-VERSION = v1
-IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
-
-[package]
-CLONE_URL = https://%(IMPORT_PATH)s
-
-[package.sub]
-```
-
-```go
-cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
-```
-
-#### Retrieve parent keys available to a child section
-
-```go
-cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
-```
-
-### Unparseable Sections
-
-Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`:
-
-```go
-cfg, err := ini.LoadSources(ini.LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
-<1> This slide has the fuel listed in the wrong units `))
-
-body := cfg.Section("COMMENTS").Body()
-
-/* --- start ---
-<1> This slide has the fuel listed in the wrong units
------- end --- */
-```
-
-### Auto-increment Key Names
-
-If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
-
-```ini
-[features]
--: Support read/write comments of keys and sections
--: Support auto-increment of key names
--: Support load multiple files to overwrite key values
-```
-
-```go
-cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
-```
-
-### Map To Struct
-
-Want more objective way to play with INI? Cool.
-
-```ini
-Name = Unknwon
-age = 21
-Male = true
-Born = 1993-01-01T20:17:05Z
-
-[Note]
-Content = Hi is a good man!
-Cities = HangZhou, Boston
-```
-
-```go
-type Note struct {
- Content string
- Cities []string
-}
-
-type Person struct {
- Name string
- Age int `ini:"age"`
- Male bool
- Born time.Time
- Note
- Created time.Time `ini:"-"`
-}
-
-func main() {
- cfg, err := ini.Load("path/to/ini")
- // ...
- p := new(Person)
- err = cfg.MapTo(p)
- // ...
-
- // Things can be simpler.
- err = ini.MapTo(p, "path/to/ini")
- // ...
-
- // Just map a section? Fine.
- n := new(Note)
- err = cfg.Section("Note").MapTo(n)
- // ...
-}
-```
-
-Can I have default value for field? Absolutely.
-
-Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
-
-```go
-// ...
-p := &Person{
- Name: "Joe",
-}
-// ...
-```
-
-It's really cool, but what's the point if you can't give me my file back from struct?
-
-### Reflect From Struct
-
-Why not?
-
-```go
-type Embeded struct {
- Dates []time.Time `delim:"|" comment:"Time data"`
- Places []string `ini:"places,omitempty"`
- None []int `ini:",omitempty"`
-}
-
-type Author struct {
- Name string `ini:"NAME"`
- Male bool
- Age int `comment:"Author's age"`
- GPA float64
- NeverMind string `ini:"-"`
- *Embeded `comment:"Embeded section"`
-}
-
-func main() {
- a := &Author{"Unknwon", true, 21, 2.8, "",
- &Embeded{
- []time.Time{time.Now(), time.Now()},
- []string{"HangZhou", "Boston"},
- []int{},
- }}
- cfg := ini.Empty()
- err = ini.ReflectFrom(cfg, a)
- // ...
-}
-```
-
-So, what do I get?
-
-```ini
-NAME = Unknwon
-Male = true
-; Author's age
-Age = 21
-GPA = 2.8
-
-; Embeded section
-[Embeded]
-; Time data
-Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
-places = HangZhou,Boston
-```
-
-#### Name Mapper
-
-To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
-
-There are 2 built-in name mappers:
-
-- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
-- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
-
-To use them:
-
-```go
-type Info struct {
- PackageName string
-}
-
-func main() {
- err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
- // ...
-
- cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
- // ...
- info := new(Info)
- cfg.NameMapper = ini.AllCapsUnderscore
- err = cfg.MapTo(info)
- // ...
-}
-```
-
-Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
-
-#### Value Mapper
-
-To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values:
-
-```go
-type Env struct {
- Foo string `ini:"foo"`
-}
-
-func main() {
- cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
- cfg.ValueMapper = os.ExpandEnv
- // ...
- env := &Env{}
- err = cfg.Section("env").MapTo(env)
-}
-```
-
-This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`.
-
-#### Other Notes On Map/Reflect
-
-Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
-
-```go
-type Child struct {
- Age string
-}
-
-type Parent struct {
- Name string
- Child
-}
-
-type Config struct {
- City string
- Parent
-}
-```
-
-Example configuration:
-
-```ini
-City = Boston
-
-[Parent]
-Name = Unknwon
-
-[Child]
-Age = 21
-```
-
-What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
-
-```go
-type Child struct {
- Age string
-}
-
-type Parent struct {
- Name string
- Child `ini:"Parent"`
-}
-
-type Config struct {
- City string
- Parent
-}
-```
-
-Example configuration:
-
-```ini
-City = Boston
-
-[Parent]
-Name = Unknwon
-Age = 21
-```
-
-## Getting Help
-
-- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
-- [File An Issue](https://github.com/go-ini/ini/issues/new)
-
-## FAQs
-
-### What does `BlockMode` field do?
-
-By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
-
-### Why another INI library?
-
-Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
-
-To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
-
-## License
-
-This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/agent/vendor/github.com/go-ini/ini/README_ZH.md b/agent/vendor/github.com/go-ini/ini/README_ZH.md
deleted file mode 100644
index 69aefef12e9..00000000000
--- a/agent/vendor/github.com/go-ini/ini/README_ZH.md
+++ /dev/null
@@ -1,750 +0,0 @@
-本包提供了 Go 语言中读写 INI 文件的功能。
-
-## 功能特性
-
-- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`)
-- 支持递归读取键值
-- 支持读取父子分区
-- 支持读取自增键名
-- 支持读取多行的键值
-- 支持大量辅助方法
-- 支持在读取时直接转换为 Go 语言类型
-- 支持读取和 **写入** 分区和键的注释
-- 轻松操作分区、键值和注释
-- 在保存文件时分区和键值会保持原有的顺序
-
-## 下载安装
-
-使用一个特定版本:
-
- go get gopkg.in/ini.v1
-
-使用最新版:
-
- go get github.com/go-ini/ini
-
-如需更新请添加 `-u` 选项。
-
-### 测试安装
-
-如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
-
- go get -t gopkg.in/ini.v1
-
-如需更新请添加 `-u` 选项。
-
-## 开始使用
-
-### 从数据源加载
-
-一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
-
-```go
-cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
-```
-
-或者从一个空白的文件开始:
-
-```go
-cfg := ini.Empty()
-```
-
-当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
-
-```go
-err := cfg.Append("other file", []byte("other raw data"))
-```
-
-当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误):
-
-```go
-cfg, err := ini.LooseLoad("filename", "filename_404")
-```
-
-更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。
-
-#### 忽略键名的大小写
-
-有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写:
-
-```go
-cfg, err := ini.InsensitiveLoad("filename")
-//...
-
-// sec1 和 sec2 指向同一个分区对象
-sec1, err := cfg.GetSection("Section")
-sec2, err := cfg.GetSection("SecTIOn")
-
-// key1 和 key2 指向同一个键对象
-key1, err := sec1.GetKey("Key")
-key2, err := sec2.GetKey("KeY")
-```
-
-#### 类似 MySQL 配置中的布尔值键
-
-MySQL 的配置文件中会出现没有具体值的布尔类型的键:
-
-```ini
-[mysqld]
-...
-skip-host-cache
-skip-name-resolve
-```
-
-默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理:
-
-```go
-cfg, err := ini.LoadSources(ini.LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
-```
-
-这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
-
-如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`:
-
-```go
-key, err := sec.NewBooleanKey("skip-host-cache")
-```
-
-#### 关于注释
-
-下述几种情况的内容将被视为注释:
-
-1. 所有以 `#` 或 `;` 开头的行
-2. 所有在 `#` 或 `;` 之后的内容
-3. 分区标签后的文字 (即 `[分区名]` 之后的内容)
-
-如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。
-
-除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释:
-
-```go
-cfg, err := ini.LoadSources(ini.LoadOptions{IgnoreInlineComment: true}, "app.ini"))
-```
-
-### 操作分区(Section)
-
-获取指定分区:
-
-```go
-section, err := cfg.GetSection("section name")
-```
-
-如果您想要获取默认分区,则可以用空字符串代替分区名:
-
-```go
-section, err := cfg.GetSection("")
-```
-
-当您非常确定某个分区是存在的,可以使用以下简便方法:
-
-```go
-section := cfg.Section("section name")
-```
-
-如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
-
-创建一个分区:
-
-```go
-err := cfg.NewSection("new section")
-```
-
-获取所有分区对象或名称:
-
-```go
-sections := cfg.Sections()
-names := cfg.SectionStrings()
-```
-
-### 操作键(Key)
-
-获取某个分区下的键:
-
-```go
-key, err := cfg.Section("").GetKey("key name")
-```
-
-和分区一样,您也可以直接获取键而忽略错误处理:
-
-```go
-key := cfg.Section("").Key("key name")
-```
-
-判断某个键是否存在:
-
-```go
-yes := cfg.Section("").HasKey("key name")
-```
-
-创建一个新的键:
-
-```go
-err := cfg.Section("").NewKey("name", "value")
-```
-
-获取分区下的所有键或键名:
-
-```go
-keys := cfg.Section("").Keys()
-names := cfg.Section("").KeyStrings()
-```
-
-获取分区下的所有键值对的克隆:
-
-```go
-hash := cfg.Section("").KeysHash()
-```
-
-### 操作键值(Value)
-
-获取一个类型为字符串(string)的值:
-
-```go
-val := cfg.Section("").Key("key name").String()
-```
-
-获取值的同时通过自定义函数进行处理验证:
-
-```go
-val := cfg.Section("").Key("key name").Validate(func(in string) string {
- if len(in) == 0 {
- return "default"
- }
- return in
-})
-```
-
-如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
-
-```go
-val := cfg.Section("").Key("key name").Value()
-```
-
-判断某个原值是否存在:
-
-```go
-yes := cfg.Section("").HasValue("test value")
-```
-
-获取其它类型的值:
-
-```go
-// 布尔值的规则:
-// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
-// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
-v, err = cfg.Section("").Key("BOOL").Bool()
-v, err = cfg.Section("").Key("FLOAT64").Float64()
-v, err = cfg.Section("").Key("INT").Int()
-v, err = cfg.Section("").Key("INT64").Int64()
-v, err = cfg.Section("").Key("UINT").Uint()
-v, err = cfg.Section("").Key("UINT64").Uint64()
-v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
-v, err = cfg.Section("").Key("TIME").Time() // RFC3339
-
-v = cfg.Section("").Key("BOOL").MustBool()
-v = cfg.Section("").Key("FLOAT64").MustFloat64()
-v = cfg.Section("").Key("INT").MustInt()
-v = cfg.Section("").Key("INT64").MustInt64()
-v = cfg.Section("").Key("UINT").MustUint()
-v = cfg.Section("").Key("UINT64").MustUint64()
-v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
-v = cfg.Section("").Key("TIME").MustTime() // RFC3339
-
-// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
-// 当键不存在或者转换失败时,则会直接返回该默认值。
-// 但是,MustString 方法必须传递一个默认值。
-
-v = cfg.Seciont("").Key("String").MustString("default")
-v = cfg.Section("").Key("BOOL").MustBool(true)
-v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
-v = cfg.Section("").Key("INT").MustInt(10)
-v = cfg.Section("").Key("INT64").MustInt64(99)
-v = cfg.Section("").Key("UINT").MustUint(3)
-v = cfg.Section("").Key("UINT64").MustUint64(6)
-v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
-v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
-```
-
-如果我的值有好多行怎么办?
-
-```ini
-[advance]
-ADDRESS = """404 road,
-NotFound, State, 5000
-Earth"""
-```
-
-嗯哼?小 case!
-
-```go
-cfg.Section("advance").Key("ADDRESS").String()
-
-/* --- start ---
-404 road,
-NotFound, State, 5000
-Earth
------- end --- */
-```
-
-赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
-
-```ini
-[advance]
-two_lines = how about \
- continuation lines?
-lots_of_lines = 1 \
- 2 \
- 3 \
- 4
-```
-
-简直是小菜一碟!
-
-```go
-cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
-cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
-```
-
-可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢?
-
-```go
-cfg, err := ini.LoadSources(ini.LoadOptions{
- IgnoreContinuation: true,
-}, "filename")
-```
-
-哇靠给力啊!
-
-需要注意的是,值两侧的单引号会被自动剔除:
-
-```ini
-foo = "some value" // foo: some value
-bar = 'some value' // bar: some value
-```
-
-有时您会获得像从 [Crowdin](https://crowdin.com/) 网站下载的文件那样具有特殊格式的值(值使用双引号括起来,内部的双引号被转义):
-
-```ini
-create_repo="创建了仓库 %s"
-```
-
-那么,怎么自动地将这类值进行处理呢?
-
-```go
-cfg, err := ini.LoadSources(ini.LoadOptions{UnescapeValueDoubleQuotes: true}, "en-US.ini"))
-cfg.Section("").Key("create_repo").String()
-// You got: 创建了仓库 %s
-```
-
-这就是全部了?哈哈,当然不是。
-
-#### 操作键值的辅助方法
-
-获取键值时设定候选值:
-
-```go
-v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
-v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
-v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
-v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
-v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
-v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
-v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
-v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
-```
-
-如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
-
-验证获取的值是否在指定范围内:
-
-```go
-vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
-vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
-vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
-vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
-vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
-vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
-vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
-```
-
-##### 自动分割键值到切片(slice)
-
-当存在无效输入时,使用零值代替:
-
-```go
-// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
-// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
-vals = cfg.Section("").Key("STRINGS").Strings(",")
-vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
-vals = cfg.Section("").Key("INTS").Ints(",")
-vals = cfg.Section("").Key("INT64S").Int64s(",")
-vals = cfg.Section("").Key("UINTS").Uints(",")
-vals = cfg.Section("").Key("UINT64S").Uint64s(",")
-vals = cfg.Section("").Key("TIMES").Times(",")
-```
-
-从结果切片中剔除无效输入:
-
-```go
-// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
-// Input: how, 2.2, are, you -> [2.2]
-vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
-vals = cfg.Section("").Key("INTS").ValidInts(",")
-vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
-vals = cfg.Section("").Key("UINTS").ValidUints(",")
-vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
-vals = cfg.Section("").Key("TIMES").ValidTimes(",")
-```
-
-当存在无效输入时,直接返回错误:
-
-```go
-// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
-// Input: how, 2.2, are, you -> error
-vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
-vals = cfg.Section("").Key("INTS").StrictInts(",")
-vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
-vals = cfg.Section("").Key("UINTS").StrictUints(",")
-vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
-vals = cfg.Section("").Key("TIMES").StrictTimes(",")
-```
-
-### 保存配置
-
-终于到了这个时刻,是时候保存一下配置了。
-
-比较原始的做法是输出配置到某个文件:
-
-```go
-// ...
-err = cfg.SaveTo("my.ini")
-err = cfg.SaveToIndent("my.ini", "\t")
-```
-
-另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
-
-```go
-// ...
-cfg.WriteTo(writer)
-cfg.WriteToIndent(writer, "\t")
-```
-
-默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能:
-
-```go
-ini.PrettyFormat = false
-```
-
-## 高级用法
-
-### 递归读取键值
-
-在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
-
-```ini
-NAME = ini
-
-[author]
-NAME = Unknwon
-GITHUB = https://github.com/%(NAME)s
-
-[package]
-FULL_NAME = github.com/go-ini/%(NAME)s
-```
-
-```go
-cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
-cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
-```
-
-### 读取父子分区
-
-您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
-
-```ini
-NAME = ini
-VERSION = v1
-IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
-
-[package]
-CLONE_URL = https://%(IMPORT_PATH)s
-
-[package.sub]
-```
-
-```go
-cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
-```
-
-#### 获取上级父分区下的所有键名
-
-```go
-cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
-```
-
-### 无法解析的分区
-
-如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理:
-
-```go
-cfg, err := LoadSources(ini.LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
-<1> This slide has the fuel listed in the wrong units `))
-
-body := cfg.Section("COMMENTS").Body()
-
-/* --- start ---
-<1> This slide has the fuel listed in the wrong units
------- end --- */
-```
-
-### 读取自增键名
-
-如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
-
-```ini
-[features]
--: Support read/write comments of keys and sections
--: Support auto-increment of key names
--: Support load multiple files to overwrite key values
-```
-
-```go
-cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
-```
-
-### 映射到结构
-
-想要使用更加面向对象的方式玩转 INI 吗?好主意。
-
-```ini
-Name = Unknwon
-age = 21
-Male = true
-Born = 1993-01-01T20:17:05Z
-
-[Note]
-Content = Hi is a good man!
-Cities = HangZhou, Boston
-```
-
-```go
-type Note struct {
- Content string
- Cities []string
-}
-
-type Person struct {
- Name string
- Age int `ini:"age"`
- Male bool
- Born time.Time
- Note
- Created time.Time `ini:"-"`
-}
-
-func main() {
- cfg, err := ini.Load("path/to/ini")
- // ...
- p := new(Person)
- err = cfg.MapTo(p)
- // ...
-
- // 一切竟可以如此的简单。
- err = ini.MapTo(p, "path/to/ini")
- // ...
-
- // 嗯哼?只需要映射一个分区吗?
- n := new(Note)
- err = cfg.Section("Note").MapTo(n)
- // ...
-}
-```
-
-结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
-
-```go
-// ...
-p := &Person{
- Name: "Joe",
-}
-// ...
-```
-
-这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
-
-### 从结构反射
-
-可是,我有说不能吗?
-
-```go
-type Embeded struct {
- Dates []time.Time `delim:"|" comment:"Time data"`
- Places []string `ini:"places,omitempty"`
- None []int `ini:",omitempty"`
-}
-
-type Author struct {
- Name string `ini:"NAME"`
- Male bool
- Age int `comment:"Author's age"`
- GPA float64
- NeverMind string `ini:"-"`
- *Embeded `comment:"Embeded section"`
-}
-
-func main() {
- a := &Author{"Unknwon", true, 21, 2.8, "",
- &Embeded{
- []time.Time{time.Now(), time.Now()},
- []string{"HangZhou", "Boston"},
- []int{},
- }}
- cfg := ini.Empty()
- err = ini.ReflectFrom(cfg, a)
- // ...
-}
-```
-
-瞧瞧,奇迹发生了。
-
-```ini
-NAME = Unknwon
-Male = true
-; Author's age
-Age = 21
-GPA = 2.8
-
-; Embeded section
-[Embeded]
-; Time data
-Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
-places = HangZhou,Boston
-```
-
-#### 名称映射器(Name Mapper)
-
-为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
-
-目前有 2 款内置的映射器:
-
-- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
-- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
-
-使用方法:
-
-```go
-type Info struct{
- PackageName string
-}
-
-func main() {
- err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
- // ...
-
- cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
- // ...
- info := new(Info)
- cfg.NameMapper = ini.AllCapsUnderscore
- err = cfg.MapTo(info)
- // ...
-}
-```
-
-使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
-
-#### 值映射器(Value Mapper)
-
-值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量:
-
-```go
-type Env struct {
- Foo string `ini:"foo"`
-}
-
-func main() {
- cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
- cfg.ValueMapper = os.ExpandEnv
- // ...
- env := &Env{}
- err = cfg.Section("env").MapTo(env)
-}
-```
-
-本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。
-
-#### 映射/反射的其它说明
-
-任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
-
-```go
-type Child struct {
- Age string
-}
-
-type Parent struct {
- Name string
- Child
-}
-
-type Config struct {
- City string
- Parent
-}
-```
-
-示例配置文件:
-
-```ini
-City = Boston
-
-[Parent]
-Name = Unknwon
-
-[Child]
-Age = 21
-```
-
-很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
-
-```go
-type Child struct {
- Age string
-}
-
-type Parent struct {
- Name string
- Child `ini:"Parent"`
-}
-
-type Config struct {
- City string
- Parent
-}
-```
-
-示例配置文件:
-
-```ini
-City = Boston
-
-[Parent]
-Name = Unknwon
-Age = 21
-```
-
-## 获取帮助
-
-- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
-- [创建工单](https://github.com/go-ini/ini/issues/new)
-
-## 常见问题
-
-### 字段 `BlockMode` 是什么?
-
-默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
-
-### 为什么要写另一个 INI 解析库?
-
-许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
-
-为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
diff --git a/agent/vendor/github.com/go-ini/ini/error.go b/agent/vendor/github.com/go-ini/ini/error.go
deleted file mode 100644
index 80afe743158..00000000000
--- a/agent/vendor/github.com/go-ini/ini/error.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2016 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "fmt"
-)
-
-type ErrDelimiterNotFound struct {
- Line string
-}
-
-func IsErrDelimiterNotFound(err error) bool {
- _, ok := err.(ErrDelimiterNotFound)
- return ok
-}
-
-func (err ErrDelimiterNotFound) Error() string {
- return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
-}
diff --git a/agent/vendor/github.com/go-ini/ini/file.go b/agent/vendor/github.com/go-ini/ini/file.go
deleted file mode 100644
index 93ac50836cc..00000000000
--- a/agent/vendor/github.com/go-ini/ini/file.go
+++ /dev/null
@@ -1,392 +0,0 @@
-// Copyright 2017 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "strings"
- "sync"
-)
-
-// File represents a combination of a or more INI file(s) in memory.
-type File struct {
- options LoadOptions
- dataSources []dataSource
-
- // Should make things safe, but sometimes doesn't matter.
- BlockMode bool
- lock sync.RWMutex
-
- // To keep data in order.
- sectionList []string
- // Actual data is stored here.
- sections map[string]*Section
-
- NameMapper
- ValueMapper
-}
-
-// newFile initializes File object with given data sources.
-func newFile(dataSources []dataSource, opts LoadOptions) *File {
- return &File{
- BlockMode: true,
- dataSources: dataSources,
- sections: make(map[string]*Section),
- sectionList: make([]string, 0, 10),
- options: opts,
- }
-}
-
-// Empty returns an empty file object.
-func Empty() *File {
- // Ignore error here, we sure our data is good.
- f, _ := Load([]byte(""))
- return f
-}
-
-// NewSection creates a new section.
-func (f *File) NewSection(name string) (*Section, error) {
- if len(name) == 0 {
- return nil, errors.New("error creating new section: empty section name")
- } else if f.options.Insensitive && name != DEFAULT_SECTION {
- name = strings.ToLower(name)
- }
-
- if f.BlockMode {
- f.lock.Lock()
- defer f.lock.Unlock()
- }
-
- if inSlice(name, f.sectionList) {
- return f.sections[name], nil
- }
-
- f.sectionList = append(f.sectionList, name)
- f.sections[name] = newSection(f, name)
- return f.sections[name], nil
-}
-
-// NewRawSection creates a new section with an unparseable body.
-func (f *File) NewRawSection(name, body string) (*Section, error) {
- section, err := f.NewSection(name)
- if err != nil {
- return nil, err
- }
-
- section.isRawSection = true
- section.rawBody = body
- return section, nil
-}
-
-// NewSections creates a list of sections.
-func (f *File) NewSections(names ...string) (err error) {
- for _, name := range names {
- if _, err = f.NewSection(name); err != nil {
- return err
- }
- }
- return nil
-}
-
-// GetSection returns section by given name.
-func (f *File) GetSection(name string) (*Section, error) {
- if len(name) == 0 {
- name = DEFAULT_SECTION
- }
- if f.options.Insensitive {
- name = strings.ToLower(name)
- }
-
- if f.BlockMode {
- f.lock.RLock()
- defer f.lock.RUnlock()
- }
-
- sec := f.sections[name]
- if sec == nil {
- return nil, fmt.Errorf("section '%s' does not exist", name)
- }
- return sec, nil
-}
-
-// Section assumes named section exists and returns a zero-value when not.
-func (f *File) Section(name string) *Section {
- sec, err := f.GetSection(name)
- if err != nil {
- // Note: It's OK here because the only possible error is empty section name,
- // but if it's empty, this piece of code won't be executed.
- sec, _ = f.NewSection(name)
- return sec
- }
- return sec
-}
-
-// Section returns list of Section.
-func (f *File) Sections() []*Section {
- sections := make([]*Section, len(f.sectionList))
- for i := range f.sectionList {
- sections[i] = f.Section(f.sectionList[i])
- }
- return sections
-}
-
-// ChildSections returns a list of child sections of given section name.
-func (f *File) ChildSections(name string) []*Section {
- return f.Section(name).ChildSections()
-}
-
-// SectionStrings returns list of section names.
-func (f *File) SectionStrings() []string {
- list := make([]string, len(f.sectionList))
- copy(list, f.sectionList)
- return list
-}
-
-// DeleteSection deletes a section.
-func (f *File) DeleteSection(name string) {
- if f.BlockMode {
- f.lock.Lock()
- defer f.lock.Unlock()
- }
-
- if len(name) == 0 {
- name = DEFAULT_SECTION
- }
-
- for i, s := range f.sectionList {
- if s == name {
- f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
- delete(f.sections, name)
- return
- }
- }
-}
-
-func (f *File) reload(s dataSource) error {
- r, err := s.ReadCloser()
- if err != nil {
- return err
- }
- defer r.Close()
-
- return f.parse(r)
-}
-
-// Reload reloads and parses all data sources.
-func (f *File) Reload() (err error) {
- for _, s := range f.dataSources {
- if err = f.reload(s); err != nil {
- // In loose mode, we create an empty default section for nonexistent files.
- if os.IsNotExist(err) && f.options.Loose {
- f.parse(bytes.NewBuffer(nil))
- continue
- }
- return err
- }
- }
- return nil
-}
-
-// Append appends one or more data sources and reloads automatically.
-func (f *File) Append(source interface{}, others ...interface{}) error {
- ds, err := parseDataSource(source)
- if err != nil {
- return err
- }
- f.dataSources = append(f.dataSources, ds)
- for _, s := range others {
- ds, err = parseDataSource(s)
- if err != nil {
- return err
- }
- f.dataSources = append(f.dataSources, ds)
- }
- return f.Reload()
-}
-
-func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
- equalSign := "="
- if PrettyFormat {
- equalSign = " = "
- }
-
- // Use buffer to make sure target is safe until finish encoding.
- buf := bytes.NewBuffer(nil)
- for i, sname := range f.sectionList {
- sec := f.Section(sname)
- if len(sec.Comment) > 0 {
- if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
- sec.Comment = "; " + sec.Comment
- } else {
- sec.Comment = sec.Comment[:1] + " " + strings.TrimSpace(sec.Comment[1:])
- }
- if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil {
- return nil, err
- }
- }
-
- if i > 0 || DefaultHeader {
- if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
- return nil, err
- }
- } else {
- // Write nothing if default section is empty
- if len(sec.keyList) == 0 {
- continue
- }
- }
-
- if sec.isRawSection {
- if _, err := buf.WriteString(sec.rawBody); err != nil {
- return nil, err
- }
-
- if PrettySection {
- // Put a line between sections
- if _, err := buf.WriteString(LineBreak); err != nil {
- return nil, err
- }
- }
- continue
- }
-
- // Count and generate alignment length and buffer spaces using the
- // longest key. Keys may be modifed if they contain certain characters so
- // we need to take that into account in our calculation.
- alignLength := 0
- if PrettyFormat {
- for _, kname := range sec.keyList {
- keyLength := len(kname)
- // First case will surround key by ` and second by """
- if strings.ContainsAny(kname, "\"=:") {
- keyLength += 2
- } else if strings.Contains(kname, "`") {
- keyLength += 6
- }
-
- if keyLength > alignLength {
- alignLength = keyLength
- }
- }
- }
- alignSpaces := bytes.Repeat([]byte(" "), alignLength)
-
- KEY_LIST:
- for _, kname := range sec.keyList {
- key := sec.Key(kname)
- if len(key.Comment) > 0 {
- if len(indent) > 0 && sname != DEFAULT_SECTION {
- buf.WriteString(indent)
- }
- if key.Comment[0] != '#' && key.Comment[0] != ';' {
- key.Comment = "; " + key.Comment
- } else {
- key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:])
- }
- if _, err := buf.WriteString(key.Comment + LineBreak); err != nil {
- return nil, err
- }
- }
-
- if len(indent) > 0 && sname != DEFAULT_SECTION {
- buf.WriteString(indent)
- }
-
- switch {
- case key.isAutoIncrement:
- kname = "-"
- case strings.ContainsAny(kname, "\"=:"):
- kname = "`" + kname + "`"
- case strings.Contains(kname, "`"):
- kname = `"""` + kname + `"""`
- }
-
- for _, val := range key.ValueWithShadows() {
- if _, err := buf.WriteString(kname); err != nil {
- return nil, err
- }
-
- if key.isBooleanType {
- if kname != sec.keyList[len(sec.keyList)-1] {
- buf.WriteString(LineBreak)
- }
- continue KEY_LIST
- }
-
- // Write out alignment spaces before "=" sign
- if PrettyFormat {
- buf.Write(alignSpaces[:alignLength-len(kname)])
- }
-
- // In case key value contains "\n", "`", "\"", "#" or ";"
- if strings.ContainsAny(val, "\n`") {
- val = `"""` + val + `"""`
- } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
- val = "`" + val + "`"
- }
- if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
- return nil, err
- }
- }
- }
-
- if PrettySection {
- // Put a line between sections
- if _, err := buf.WriteString(LineBreak); err != nil {
- return nil, err
- }
- }
- }
-
- return buf, nil
-}
-
-// WriteToIndent writes content into io.Writer with given indention.
-// If PrettyFormat has been set to be true,
-// it will align "=" sign with spaces under each section.
-func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
- buf, err := f.writeToBuffer(indent)
- if err != nil {
- return 0, err
- }
- return buf.WriteTo(w)
-}
-
-// WriteTo writes file content into io.Writer.
-func (f *File) WriteTo(w io.Writer) (int64, error) {
- return f.WriteToIndent(w, "")
-}
-
-// SaveToIndent writes content to file system with given value indention.
-func (f *File) SaveToIndent(filename, indent string) error {
- // Note: Because we are truncating with os.Create,
- // so it's safer to save to a temporary file location and rename afte done.
- buf, err := f.writeToBuffer(indent)
- if err != nil {
- return err
- }
-
- return ioutil.WriteFile(filename, buf.Bytes(), 0666)
-}
-
-// SaveTo writes content to file system.
-func (f *File) SaveTo(filename string) error {
- return f.SaveToIndent(filename, "")
-}
diff --git a/agent/vendor/github.com/go-ini/ini/ini.go b/agent/vendor/github.com/go-ini/ini/ini.go
deleted file mode 100644
index cd7c8a13556..00000000000
--- a/agent/vendor/github.com/go-ini/ini/ini.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-// Package ini provides INI file read and write functionality in Go.
-package ini
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "regexp"
- "runtime"
-)
-
-const (
- // Name for default section. You can use this constant or the string literal.
- // In most of cases, an empty string is all you need to access the section.
- DEFAULT_SECTION = "DEFAULT"
-
- // Maximum allowed depth when recursively substituing variable names.
- _DEPTH_VALUES = 99
- _VERSION = "1.31.1"
-)
-
-// Version returns current package version literal.
-func Version() string {
- return _VERSION
-}
-
-var (
- // Delimiter to determine or compose a new line.
- // This variable will be changed to "\r\n" automatically on Windows
- // at package init time.
- LineBreak = "\n"
-
- // Variable regexp pattern: %(variable)s
- varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
-
- // Indicate whether to align "=" sign with spaces to produce pretty output
- // or reduce all possible spaces for compact format.
- PrettyFormat = true
-
- // Explicitly write DEFAULT section header
- DefaultHeader = false
-
- // Indicate whether to put a line between sections
- PrettySection = true
-)
-
-func init() {
- if runtime.GOOS == "windows" {
- LineBreak = "\r\n"
- }
-}
-
-func inSlice(str string, s []string) bool {
- for _, v := range s {
- if str == v {
- return true
- }
- }
- return false
-}
-
-// dataSource is an interface that returns object which can be read and closed.
-type dataSource interface {
- ReadCloser() (io.ReadCloser, error)
-}
-
-// sourceFile represents an object that contains content on the local file system.
-type sourceFile struct {
- name string
-}
-
-func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
- return os.Open(s.name)
-}
-
-// sourceData represents an object that contains content in memory.
-type sourceData struct {
- data []byte
-}
-
-func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
- return ioutil.NopCloser(bytes.NewReader(s.data)), nil
-}
-
-// sourceReadCloser represents an input stream with Close method.
-type sourceReadCloser struct {
- reader io.ReadCloser
-}
-
-func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
- return s.reader, nil
-}
-
-func parseDataSource(source interface{}) (dataSource, error) {
- switch s := source.(type) {
- case string:
- return sourceFile{s}, nil
- case []byte:
- return &sourceData{s}, nil
- case io.ReadCloser:
- return &sourceReadCloser{s}, nil
- default:
- return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
- }
-}
-
-type LoadOptions struct {
- // Loose indicates whether the parser should ignore nonexistent files or return error.
- Loose bool
- // Insensitive indicates whether the parser forces all section and key names to lowercase.
- Insensitive bool
- // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
- IgnoreContinuation bool
- // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
- IgnoreInlineComment bool
- // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
- // This type of keys are mostly used in my.cnf.
- AllowBooleanKeys bool
- // AllowShadows indicates whether to keep track of keys with same name under same section.
- AllowShadows bool
- // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
- // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
- UnescapeValueDoubleQuotes bool
- // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
- // when value is NOT surrounded by any quotes.
- // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
- UnescapeValueCommentSymbols bool
- // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
- // conform to key/value pairs. Specify the names of those blocks here.
- UnparseableSections []string
-}
-
-func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
- sources := make([]dataSource, len(others)+1)
- sources[0], err = parseDataSource(source)
- if err != nil {
- return nil, err
- }
- for i := range others {
- sources[i+1], err = parseDataSource(others[i])
- if err != nil {
- return nil, err
- }
- }
- f := newFile(sources, opts)
- if err = f.Reload(); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// Load loads and parses from INI data sources.
-// Arguments can be mixed of file name with string type, or raw data in []byte.
-// It will return error if list contains nonexistent files.
-func Load(source interface{}, others ...interface{}) (*File, error) {
- return LoadSources(LoadOptions{}, source, others...)
-}
-
-// LooseLoad has exactly same functionality as Load function
-// except it ignores nonexistent files instead of returning error.
-func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
- return LoadSources(LoadOptions{Loose: true}, source, others...)
-}
-
-// InsensitiveLoad has exactly same functionality as Load function
-// except it forces all section and key names to be lowercased.
-func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
- return LoadSources(LoadOptions{Insensitive: true}, source, others...)
-}
-
-// InsensitiveLoad has exactly same functionality as Load function
-// except it allows have shadow keys.
-func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
- return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
-}
diff --git a/agent/vendor/github.com/go-ini/ini/key.go b/agent/vendor/github.com/go-ini/ini/key.go
deleted file mode 100644
index d3eac4776c1..00000000000
--- a/agent/vendor/github.com/go-ini/ini/key.go
+++ /dev/null
@@ -1,727 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "bytes"
- "errors"
- "fmt"
- "strconv"
- "strings"
- "time"
-)
-
-// Key represents a key under a section.
-type Key struct {
- s *Section
- Comment string
- name string
- value string
- isAutoIncrement bool
- isBooleanType bool
-
- isShadow bool
- shadows []*Key
-}
-
-// newKey simply return a key object with given values.
-func newKey(s *Section, name, val string) *Key {
- return &Key{
- s: s,
- name: name,
- value: val,
- }
-}
-
-func (k *Key) addShadow(val string) error {
- if k.isShadow {
- return errors.New("cannot add shadow to another shadow key")
- } else if k.isAutoIncrement || k.isBooleanType {
- return errors.New("cannot add shadow to auto-increment or boolean key")
- }
-
- shadow := newKey(k.s, k.name, val)
- shadow.isShadow = true
- k.shadows = append(k.shadows, shadow)
- return nil
-}
-
-// AddShadow adds a new shadow key to itself.
-func (k *Key) AddShadow(val string) error {
- if !k.s.f.options.AllowShadows {
- return errors.New("shadow key is not allowed")
- }
- return k.addShadow(val)
-}
-
-// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
-type ValueMapper func(string) string
-
-// Name returns name of key.
-func (k *Key) Name() string {
- return k.name
-}
-
-// Value returns raw value of key for performance purpose.
-func (k *Key) Value() string {
- return k.value
-}
-
-// ValueWithShadows returns raw values of key and its shadows if any.
-func (k *Key) ValueWithShadows() []string {
- if len(k.shadows) == 0 {
- return []string{k.value}
- }
- vals := make([]string, len(k.shadows)+1)
- vals[0] = k.value
- for i := range k.shadows {
- vals[i+1] = k.shadows[i].value
- }
- return vals
-}
-
-// transformValue takes a raw value and transforms to its final string.
-func (k *Key) transformValue(val string) string {
- if k.s.f.ValueMapper != nil {
- val = k.s.f.ValueMapper(val)
- }
-
- // Fail-fast if no indicate char found for recursive value
- if !strings.Contains(val, "%") {
- return val
- }
- for i := 0; i < _DEPTH_VALUES; i++ {
- vr := varPattern.FindString(val)
- if len(vr) == 0 {
- break
- }
-
- // Take off leading '%(' and trailing ')s'.
- noption := strings.TrimLeft(vr, "%(")
- noption = strings.TrimRight(noption, ")s")
-
- // Search in the same section.
- nk, err := k.s.GetKey(noption)
- if err != nil || k == nk {
- // Search again in default section.
- nk, _ = k.s.f.Section("").GetKey(noption)
- }
-
- // Substitute by new value and take off leading '%(' and trailing ')s'.
- val = strings.Replace(val, vr, nk.value, -1)
- }
- return val
-}
-
-// String returns string representation of value.
-func (k *Key) String() string {
- return k.transformValue(k.value)
-}
-
-// Validate accepts a validate function which can
-// return modifed result as key value.
-func (k *Key) Validate(fn func(string) string) string {
- return fn(k.String())
-}
-
-// parseBool returns the boolean value represented by the string.
-//
-// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
-// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
-// Any other value returns an error.
-func parseBool(str string) (value bool, err error) {
- switch str {
- case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
- return true, nil
- case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
- return false, nil
- }
- return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
-}
-
-// Bool returns bool type value.
-func (k *Key) Bool() (bool, error) {
- return parseBool(k.String())
-}
-
-// Float64 returns float64 type value.
-func (k *Key) Float64() (float64, error) {
- return strconv.ParseFloat(k.String(), 64)
-}
-
-// Int returns int type value.
-func (k *Key) Int() (int, error) {
- return strconv.Atoi(k.String())
-}
-
-// Int64 returns int64 type value.
-func (k *Key) Int64() (int64, error) {
- return strconv.ParseInt(k.String(), 10, 64)
-}
-
-// Uint returns uint type valued.
-func (k *Key) Uint() (uint, error) {
- u, e := strconv.ParseUint(k.String(), 10, 64)
- return uint(u), e
-}
-
-// Uint64 returns uint64 type value.
-func (k *Key) Uint64() (uint64, error) {
- return strconv.ParseUint(k.String(), 10, 64)
-}
-
-// Duration returns time.Duration type value.
-func (k *Key) Duration() (time.Duration, error) {
- return time.ParseDuration(k.String())
-}
-
-// TimeFormat parses with given format and returns time.Time type value.
-func (k *Key) TimeFormat(format string) (time.Time, error) {
- return time.Parse(format, k.String())
-}
-
-// Time parses with RFC3339 format and returns time.Time type value.
-func (k *Key) Time() (time.Time, error) {
- return k.TimeFormat(time.RFC3339)
-}
-
-// MustString returns default value if key value is empty.
-func (k *Key) MustString(defaultVal string) string {
- val := k.String()
- if len(val) == 0 {
- k.value = defaultVal
- return defaultVal
- }
- return val
-}
-
-// MustBool always returns value without error,
-// it returns false if error occurs.
-func (k *Key) MustBool(defaultVal ...bool) bool {
- val, err := k.Bool()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatBool(defaultVal[0])
- return defaultVal[0]
- }
- return val
-}
-
-// MustFloat64 always returns value without error,
-// it returns 0.0 if error occurs.
-func (k *Key) MustFloat64(defaultVal ...float64) float64 {
- val, err := k.Float64()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
- return defaultVal[0]
- }
- return val
-}
-
-// MustInt always returns value without error,
-// it returns 0 if error occurs.
-func (k *Key) MustInt(defaultVal ...int) int {
- val, err := k.Int()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
- return defaultVal[0]
- }
- return val
-}
-
-// MustInt64 always returns value without error,
-// it returns 0 if error occurs.
-func (k *Key) MustInt64(defaultVal ...int64) int64 {
- val, err := k.Int64()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatInt(defaultVal[0], 10)
- return defaultVal[0]
- }
- return val
-}
-
-// MustUint always returns value without error,
-// it returns 0 if error occurs.
-func (k *Key) MustUint(defaultVal ...uint) uint {
- val, err := k.Uint()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
- return defaultVal[0]
- }
- return val
-}
-
-// MustUint64 always returns value without error,
-// it returns 0 if error occurs.
-func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
- val, err := k.Uint64()
- if len(defaultVal) > 0 && err != nil {
- k.value = strconv.FormatUint(defaultVal[0], 10)
- return defaultVal[0]
- }
- return val
-}
-
-// MustDuration always returns value without error,
-// it returns zero value if error occurs.
-func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
- val, err := k.Duration()
- if len(defaultVal) > 0 && err != nil {
- k.value = defaultVal[0].String()
- return defaultVal[0]
- }
- return val
-}
-
-// MustTimeFormat always parses with given format and returns value without error,
-// it returns zero value if error occurs.
-func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
- val, err := k.TimeFormat(format)
- if len(defaultVal) > 0 && err != nil {
- k.value = defaultVal[0].Format(format)
- return defaultVal[0]
- }
- return val
-}
-
-// MustTime always parses with RFC3339 format and returns value without error,
-// it returns zero value if error occurs.
-func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
- return k.MustTimeFormat(time.RFC3339, defaultVal...)
-}
-
-// In always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) In(defaultVal string, candidates []string) string {
- val := k.String()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InFloat64 always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
- val := k.MustFloat64()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InInt always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InInt(defaultVal int, candidates []int) int {
- val := k.MustInt()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InInt64 always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
- val := k.MustInt64()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InUint always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
- val := k.MustUint()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InUint64 always returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
- val := k.MustUint64()
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InTimeFormat always parses with given format and returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
- val := k.MustTimeFormat(format)
- for _, cand := range candidates {
- if val == cand {
- return val
- }
- }
- return defaultVal
-}
-
-// InTime always parses with RFC3339 format and returns value without error,
-// it returns default value if error occurs or doesn't fit into candidates.
-func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
- return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
-}
-
-// RangeFloat64 checks if value is in given range inclusively,
-// and returns default value if it's not.
-func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
- val := k.MustFloat64()
- if val < min || val > max {
- return defaultVal
- }
- return val
-}
-
-// RangeInt checks if value is in given range inclusively,
-// and returns default value if it's not.
-func (k *Key) RangeInt(defaultVal, min, max int) int {
- val := k.MustInt()
- if val < min || val > max {
- return defaultVal
- }
- return val
-}
-
-// RangeInt64 checks if value is in given range inclusively,
-// and returns default value if it's not.
-func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
- val := k.MustInt64()
- if val < min || val > max {
- return defaultVal
- }
- return val
-}
-
-// RangeTimeFormat checks if value with given format is in given range inclusively,
-// and returns default value if it's not.
-func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
- val := k.MustTimeFormat(format)
- if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
- return defaultVal
- }
- return val
-}
-
-// RangeTime checks if value with RFC3339 format is in given range inclusively,
-// and returns default value if it's not.
-func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
- return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
-}
-
-// Strings returns list of string divided by given delimiter.
-func (k *Key) Strings(delim string) []string {
- str := k.String()
- if len(str) == 0 {
- return []string{}
- }
-
- runes := []rune(str)
- vals := make([]string, 0, 2)
- var buf bytes.Buffer
- escape := false
- idx := 0
- for {
- if escape {
- escape = false
- if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
- buf.WriteRune('\\')
- }
- buf.WriteRune(runes[idx])
- } else {
- if runes[idx] == '\\' {
- escape = true
- } else if strings.HasPrefix(string(runes[idx:]), delim) {
- idx += len(delim) - 1
- vals = append(vals, strings.TrimSpace(buf.String()))
- buf.Reset()
- } else {
- buf.WriteRune(runes[idx])
- }
- }
- idx += 1
- if idx == len(runes) {
- break
- }
- }
-
- if buf.Len() > 0 {
- vals = append(vals, strings.TrimSpace(buf.String()))
- }
-
- return vals
-}
-
-// StringsWithShadows returns list of string divided by given delimiter.
-// Shadows will also be appended if any.
-func (k *Key) StringsWithShadows(delim string) []string {
- vals := k.ValueWithShadows()
- results := make([]string, 0, len(vals)*2)
- for i := range vals {
- if len(vals) == 0 {
- continue
- }
-
- results = append(results, strings.Split(vals[i], delim)...)
- }
-
- for i := range results {
- results[i] = k.transformValue(strings.TrimSpace(results[i]))
- }
- return results
-}
-
-// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
-func (k *Key) Float64s(delim string) []float64 {
- vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
- return vals
-}
-
-// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
-func (k *Key) Ints(delim string) []int {
- vals, _ := k.parseInts(k.Strings(delim), true, false)
- return vals
-}
-
-// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
-func (k *Key) Int64s(delim string) []int64 {
- vals, _ := k.parseInt64s(k.Strings(delim), true, false)
- return vals
-}
-
-// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
-func (k *Key) Uints(delim string) []uint {
- vals, _ := k.parseUints(k.Strings(delim), true, false)
- return vals
-}
-
-// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
-func (k *Key) Uint64s(delim string) []uint64 {
- vals, _ := k.parseUint64s(k.Strings(delim), true, false)
- return vals
-}
-
-// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
-// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
-func (k *Key) TimesFormat(format, delim string) []time.Time {
- vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
- return vals
-}
-
-// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
-// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
-func (k *Key) Times(delim string) []time.Time {
- return k.TimesFormat(time.RFC3339, delim)
-}
-
-// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
-// it will not be included to result list.
-func (k *Key) ValidFloat64s(delim string) []float64 {
- vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
- return vals
-}
-
-// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
-// not be included to result list.
-func (k *Key) ValidInts(delim string) []int {
- vals, _ := k.parseInts(k.Strings(delim), false, false)
- return vals
-}
-
-// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
-// then it will not be included to result list.
-func (k *Key) ValidInt64s(delim string) []int64 {
- vals, _ := k.parseInt64s(k.Strings(delim), false, false)
- return vals
-}
-
-// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
-// then it will not be included to result list.
-func (k *Key) ValidUints(delim string) []uint {
- vals, _ := k.parseUints(k.Strings(delim), false, false)
- return vals
-}
-
-// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
-// integer, then it will not be included to result list.
-func (k *Key) ValidUint64s(delim string) []uint64 {
- vals, _ := k.parseUint64s(k.Strings(delim), false, false)
- return vals
-}
-
-// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
-func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
- vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
- return vals
-}
-
-// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
-func (k *Key) ValidTimes(delim string) []time.Time {
- return k.ValidTimesFormat(time.RFC3339, delim)
-}
-
-// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
-func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
- return k.parseFloat64s(k.Strings(delim), false, true)
-}
-
-// StrictInts returns list of int divided by given delimiter or error on first invalid input.
-func (k *Key) StrictInts(delim string) ([]int, error) {
- return k.parseInts(k.Strings(delim), false, true)
-}
-
-// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
-func (k *Key) StrictInt64s(delim string) ([]int64, error) {
- return k.parseInt64s(k.Strings(delim), false, true)
-}
-
-// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
-func (k *Key) StrictUints(delim string) ([]uint, error) {
- return k.parseUints(k.Strings(delim), false, true)
-}
-
-// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
-func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
- return k.parseUint64s(k.Strings(delim), false, true)
-}
-
-// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
-// or error on first invalid input.
-func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
- return k.parseTimesFormat(format, k.Strings(delim), false, true)
-}
-
-// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
-// or error on first invalid input.
-func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
- return k.StrictTimesFormat(time.RFC3339, delim)
-}
-
-// parseFloat64s transforms strings to float64s.
-func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
- vals := make([]float64, 0, len(strs))
- for _, str := range strs {
- val, err := strconv.ParseFloat(str, 64)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, val)
- }
- }
- return vals, nil
-}
-
-// parseInts transforms strings to ints.
-func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
- vals := make([]int, 0, len(strs))
- for _, str := range strs {
- val, err := strconv.Atoi(str)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, val)
- }
- }
- return vals, nil
-}
-
-// parseInt64s transforms strings to int64s.
-func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
- vals := make([]int64, 0, len(strs))
- for _, str := range strs {
- val, err := strconv.ParseInt(str, 10, 64)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, val)
- }
- }
- return vals, nil
-}
-
-// parseUints transforms strings to uints.
-func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
- vals := make([]uint, 0, len(strs))
- for _, str := range strs {
- val, err := strconv.ParseUint(str, 10, 0)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, uint(val))
- }
- }
- return vals, nil
-}
-
-// parseUint64s transforms strings to uint64s.
-func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
- vals := make([]uint64, 0, len(strs))
- for _, str := range strs {
- val, err := strconv.ParseUint(str, 10, 64)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, val)
- }
- }
- return vals, nil
-}
-
-// parseTimesFormat transforms strings to times in given format.
-func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
- vals := make([]time.Time, 0, len(strs))
- for _, str := range strs {
- val, err := time.Parse(format, str)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, val)
- }
- }
- return vals, nil
-}
-
-// SetValue changes key value.
-func (k *Key) SetValue(v string) {
- if k.s.f.BlockMode {
- k.s.f.lock.Lock()
- defer k.s.f.lock.Unlock()
- }
-
- k.value = v
- k.s.keysHash[k.name] = v
-}
diff --git a/agent/vendor/github.com/go-ini/ini/parser.go b/agent/vendor/github.com/go-ini/ini/parser.go
deleted file mode 100644
index 6bd3cd34014..00000000000
--- a/agent/vendor/github.com/go-ini/ini/parser.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2015 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "strconv"
- "strings"
- "unicode"
-)
-
-type tokenType int
-
-const (
- _TOKEN_INVALID tokenType = iota
- _TOKEN_COMMENT
- _TOKEN_SECTION
- _TOKEN_KEY
-)
-
-type parser struct {
- buf *bufio.Reader
- isEOF bool
- count int
- comment *bytes.Buffer
-}
-
-func newParser(r io.Reader) *parser {
- return &parser{
- buf: bufio.NewReader(r),
- count: 1,
- comment: &bytes.Buffer{},
- }
-}
-
-// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
-// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
-func (p *parser) BOM() error {
- mask, err := p.buf.Peek(2)
- if err != nil && err != io.EOF {
- return err
- } else if len(mask) < 2 {
- return nil
- }
-
- switch {
- case mask[0] == 254 && mask[1] == 255:
- fallthrough
- case mask[0] == 255 && mask[1] == 254:
- p.buf.Read(mask)
- case mask[0] == 239 && mask[1] == 187:
- mask, err := p.buf.Peek(3)
- if err != nil && err != io.EOF {
- return err
- } else if len(mask) < 3 {
- return nil
- }
- if mask[2] == 191 {
- p.buf.Read(mask)
- }
- }
- return nil
-}
-
-func (p *parser) readUntil(delim byte) ([]byte, error) {
- data, err := p.buf.ReadBytes(delim)
- if err != nil {
- if err == io.EOF {
- p.isEOF = true
- } else {
- return nil, err
- }
- }
- return data, nil
-}
-
-func cleanComment(in []byte) ([]byte, bool) {
- i := bytes.IndexAny(in, "#;")
- if i == -1 {
- return nil, false
- }
- return in[i:], true
-}
-
-func readKeyName(in []byte) (string, int, error) {
- line := string(in)
-
- // Check if key name surrounded by quotes.
- var keyQuote string
- if line[0] == '"' {
- if len(line) > 6 && string(line[0:3]) == `"""` {
- keyQuote = `"""`
- } else {
- keyQuote = `"`
- }
- } else if line[0] == '`' {
- keyQuote = "`"
- }
-
- // Get out key name
- endIdx := -1
- if len(keyQuote) > 0 {
- startIdx := len(keyQuote)
- // FIXME: fail case -> """"""name"""=value
- pos := strings.Index(line[startIdx:], keyQuote)
- if pos == -1 {
- return "", -1, fmt.Errorf("missing closing key quote: %s", line)
- }
- pos += startIdx
-
- // Find key-value delimiter
- i := strings.IndexAny(line[pos+startIdx:], "=:")
- if i < 0 {
- return "", -1, ErrDelimiterNotFound{line}
- }
- endIdx = pos + i
- return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
- }
-
- endIdx = strings.IndexAny(line, "=:")
- if endIdx < 0 {
- return "", -1, ErrDelimiterNotFound{line}
- }
- return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
-}
-
-func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
- for {
- data, err := p.readUntil('\n')
- if err != nil {
- return "", err
- }
- next := string(data)
-
- pos := strings.LastIndex(next, valQuote)
- if pos > -1 {
- val += next[:pos]
-
- comment, has := cleanComment([]byte(next[pos:]))
- if has {
- p.comment.Write(bytes.TrimSpace(comment))
- }
- break
- }
- val += next
- if p.isEOF {
- return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
- }
- }
- return val, nil
-}
-
-func (p *parser) readContinuationLines(val string) (string, error) {
- for {
- data, err := p.readUntil('\n')
- if err != nil {
- return "", err
- }
- next := strings.TrimSpace(string(data))
-
- if len(next) == 0 {
- break
- }
- val += next
- if val[len(val)-1] != '\\' {
- break
- }
- val = val[:len(val)-1]
- }
- return val, nil
-}
-
-// hasSurroundedQuote check if and only if the first and last characters
-// are quotes \" or \'.
-// It returns false if any other parts also contain same kind of quotes.
-func hasSurroundedQuote(in string, quote byte) bool {
- return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
- strings.IndexByte(in[1:], quote) == len(in)-2
-}
-
-func (p *parser) readValue(in []byte,
- ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols bool) (string, error) {
-
- line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
- if len(line) == 0 {
- return "", nil
- }
-
- var valQuote string
- if len(line) > 3 && string(line[0:3]) == `"""` {
- valQuote = `"""`
- } else if line[0] == '`' {
- valQuote = "`"
- } else if unescapeValueDoubleQuotes && line[0] == '"' {
- valQuote = `"`
- }
-
- if len(valQuote) > 0 {
- startIdx := len(valQuote)
- pos := strings.LastIndex(line[startIdx:], valQuote)
- // Check for multi-line value
- if pos == -1 {
- return p.readMultilines(line, line[startIdx:], valQuote)
- }
-
- if unescapeValueDoubleQuotes && valQuote == `"` {
- return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
- }
- return line[startIdx : pos+startIdx], nil
- }
-
- // Won't be able to reach here if value only contains whitespace
- line = strings.TrimSpace(line)
-
- // Check continuation lines when desired
- if !ignoreContinuation && line[len(line)-1] == '\\' {
- return p.readContinuationLines(line[:len(line)-1])
- }
-
- // Check if ignore inline comment
- if !ignoreInlineComment {
- i := strings.IndexAny(line, "#;")
- if i > -1 {
- p.comment.WriteString(line[i:])
- line = strings.TrimSpace(line[:i])
- }
- }
-
- // Trim single and double quotes
- if hasSurroundedQuote(line, '\'') ||
- hasSurroundedQuote(line, '"') {
- line = line[1 : len(line)-1]
- } else if len(valQuote) == 0 && unescapeValueCommentSymbols {
- if strings.Contains(line, `\;`) {
- line = strings.Replace(line, `\;`, ";", -1)
- }
- if strings.Contains(line, `\#`) {
- line = strings.Replace(line, `\#`, "#", -1)
- }
- }
- return line, nil
-}
-
-// parse parses data through an io.Reader.
-func (f *File) parse(reader io.Reader) (err error) {
- p := newParser(reader)
- if err = p.BOM(); err != nil {
- return fmt.Errorf("BOM: %v", err)
- }
-
- // Ignore error because default section name is never empty string.
- name := DEFAULT_SECTION
- if f.options.Insensitive {
- name = strings.ToLower(DEFAULT_SECTION)
- }
- section, _ := f.NewSection(name)
-
- var line []byte
- var inUnparseableSection bool
- for !p.isEOF {
- line, err = p.readUntil('\n')
- if err != nil {
- return err
- }
-
- line = bytes.TrimLeftFunc(line, unicode.IsSpace)
- if len(line) == 0 {
- continue
- }
-
- // Comments
- if line[0] == '#' || line[0] == ';' {
- // Note: we do not care ending line break,
- // it is needed for adding second line,
- // so just clean it once at the end when set to value.
- p.comment.Write(line)
- continue
- }
-
- // Section
- if line[0] == '[' {
- // Read to the next ']' (TODO: support quoted strings)
- // TODO(unknwon): use LastIndexByte when stop supporting Go1.4
- closeIdx := bytes.LastIndex(line, []byte("]"))
- if closeIdx == -1 {
- return fmt.Errorf("unclosed section: %s", line)
- }
-
- name := string(line[1:closeIdx])
- section, err = f.NewSection(name)
- if err != nil {
- return err
- }
-
- comment, has := cleanComment(line[closeIdx+1:])
- if has {
- p.comment.Write(comment)
- }
-
- section.Comment = strings.TrimSpace(p.comment.String())
-
- // Reset aotu-counter and comments
- p.comment.Reset()
- p.count = 1
-
- inUnparseableSection = false
- for i := range f.options.UnparseableSections {
- if f.options.UnparseableSections[i] == name ||
- (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
- inUnparseableSection = true
- continue
- }
- }
- continue
- }
-
- if inUnparseableSection {
- section.isRawSection = true
- section.rawBody += string(line)
- continue
- }
-
- kname, offset, err := readKeyName(line)
- if err != nil {
- // Treat as boolean key when desired, and whole line is key name.
- if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
- kname, err := p.readValue(line,
- f.options.IgnoreContinuation,
- f.options.IgnoreInlineComment,
- f.options.UnescapeValueDoubleQuotes,
- f.options.UnescapeValueCommentSymbols)
- if err != nil {
- return err
- }
- key, err := section.NewBooleanKey(kname)
- if err != nil {
- return err
- }
- key.Comment = strings.TrimSpace(p.comment.String())
- p.comment.Reset()
- continue
- }
- return err
- }
-
- // Auto increment.
- isAutoIncr := false
- if kname == "-" {
- isAutoIncr = true
- kname = "#" + strconv.Itoa(p.count)
- p.count++
- }
-
- value, err := p.readValue(line[offset:],
- f.options.IgnoreContinuation,
- f.options.IgnoreInlineComment,
- f.options.UnescapeValueDoubleQuotes,
- f.options.UnescapeValueCommentSymbols)
- if err != nil {
- return err
- }
-
- key, err := section.NewKey(kname, value)
- if err != nil {
- return err
- }
- key.isAutoIncrement = isAutoIncr
- key.Comment = strings.TrimSpace(p.comment.String())
- p.comment.Reset()
- }
- return nil
-}
diff --git a/agent/vendor/github.com/go-ini/ini/section.go b/agent/vendor/github.com/go-ini/ini/section.go
deleted file mode 100644
index d8a40261920..00000000000
--- a/agent/vendor/github.com/go-ini/ini/section.go
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-// Section represents a config section.
-type Section struct {
- f *File
- Comment string
- name string
- keys map[string]*Key
- keyList []string
- keysHash map[string]string
-
- isRawSection bool
- rawBody string
-}
-
-func newSection(f *File, name string) *Section {
- return &Section{
- f: f,
- name: name,
- keys: make(map[string]*Key),
- keyList: make([]string, 0, 10),
- keysHash: make(map[string]string),
- }
-}
-
-// Name returns name of Section.
-func (s *Section) Name() string {
- return s.name
-}
-
-// Body returns rawBody of Section if the section was marked as unparseable.
-// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
-func (s *Section) Body() string {
- return strings.TrimSpace(s.rawBody)
-}
-
-// SetBody updates body content only if section is raw.
-func (s *Section) SetBody(body string) {
- if !s.isRawSection {
- return
- }
- s.rawBody = body
-}
-
-// NewKey creates a new key to given section.
-func (s *Section) NewKey(name, val string) (*Key, error) {
- if len(name) == 0 {
- return nil, errors.New("error creating new key: empty key name")
- } else if s.f.options.Insensitive {
- name = strings.ToLower(name)
- }
-
- if s.f.BlockMode {
- s.f.lock.Lock()
- defer s.f.lock.Unlock()
- }
-
- if inSlice(name, s.keyList) {
- if s.f.options.AllowShadows {
- if err := s.keys[name].addShadow(val); err != nil {
- return nil, err
- }
- } else {
- s.keys[name].value = val
- }
- return s.keys[name], nil
- }
-
- s.keyList = append(s.keyList, name)
- s.keys[name] = newKey(s, name, val)
- s.keysHash[name] = val
- return s.keys[name], nil
-}
-
-// NewBooleanKey creates a new boolean type key to given section.
-func (s *Section) NewBooleanKey(name string) (*Key, error) {
- key, err := s.NewKey(name, "true")
- if err != nil {
- return nil, err
- }
-
- key.isBooleanType = true
- return key, nil
-}
-
-// GetKey returns key in section by given name.
-func (s *Section) GetKey(name string) (*Key, error) {
- // FIXME: change to section level lock?
- if s.f.BlockMode {
- s.f.lock.RLock()
- }
- if s.f.options.Insensitive {
- name = strings.ToLower(name)
- }
- key := s.keys[name]
- if s.f.BlockMode {
- s.f.lock.RUnlock()
- }
-
- if key == nil {
- // Check if it is a child-section.
- sname := s.name
- for {
- if i := strings.LastIndex(sname, "."); i > -1 {
- sname = sname[:i]
- sec, err := s.f.GetSection(sname)
- if err != nil {
- continue
- }
- return sec.GetKey(name)
- } else {
- break
- }
- }
- return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
- }
- return key, nil
-}
-
-// HasKey returns true if section contains a key with given name.
-func (s *Section) HasKey(name string) bool {
- key, _ := s.GetKey(name)
- return key != nil
-}
-
-// Haskey is a backwards-compatible name for HasKey.
-// TODO: delete me in v2
-func (s *Section) Haskey(name string) bool {
- return s.HasKey(name)
-}
-
-// HasValue returns true if section contains given raw value.
-func (s *Section) HasValue(value string) bool {
- if s.f.BlockMode {
- s.f.lock.RLock()
- defer s.f.lock.RUnlock()
- }
-
- for _, k := range s.keys {
- if value == k.value {
- return true
- }
- }
- return false
-}
-
-// Key assumes named Key exists in section and returns a zero-value when not.
-func (s *Section) Key(name string) *Key {
- key, err := s.GetKey(name)
- if err != nil {
- // It's OK here because the only possible error is empty key name,
- // but if it's empty, this piece of code won't be executed.
- key, _ = s.NewKey(name, "")
- return key
- }
- return key
-}
-
-// Keys returns list of keys of section.
-func (s *Section) Keys() []*Key {
- keys := make([]*Key, len(s.keyList))
- for i := range s.keyList {
- keys[i] = s.Key(s.keyList[i])
- }
- return keys
-}
-
-// ParentKeys returns list of keys of parent section.
-func (s *Section) ParentKeys() []*Key {
- var parentKeys []*Key
- sname := s.name
- for {
- if i := strings.LastIndex(sname, "."); i > -1 {
- sname = sname[:i]
- sec, err := s.f.GetSection(sname)
- if err != nil {
- continue
- }
- parentKeys = append(parentKeys, sec.Keys()...)
- } else {
- break
- }
-
- }
- return parentKeys
-}
-
-// KeyStrings returns list of key names of section.
-func (s *Section) KeyStrings() []string {
- list := make([]string, len(s.keyList))
- copy(list, s.keyList)
- return list
-}
-
-// KeysHash returns keys hash consisting of names and values.
-func (s *Section) KeysHash() map[string]string {
- if s.f.BlockMode {
- s.f.lock.RLock()
- defer s.f.lock.RUnlock()
- }
-
- hash := map[string]string{}
- for key, value := range s.keysHash {
- hash[key] = value
- }
- return hash
-}
-
-// DeleteKey deletes a key from section.
-func (s *Section) DeleteKey(name string) {
- if s.f.BlockMode {
- s.f.lock.Lock()
- defer s.f.lock.Unlock()
- }
-
- for i, k := range s.keyList {
- if k == name {
- s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
- delete(s.keys, name)
- return
- }
- }
-}
-
-// ChildSections returns a list of child sections of current section.
-// For example, "[parent.child1]" and "[parent.child12]" are child sections
-// of section "[parent]".
-func (s *Section) ChildSections() []*Section {
- prefix := s.name + "."
- children := make([]*Section, 0, 3)
- for _, name := range s.f.sectionList {
- if strings.HasPrefix(name, prefix) {
- children = append(children, s.f.sections[name])
- }
- }
- return children
-}
diff --git a/agent/vendor/github.com/go-ini/ini/struct.go b/agent/vendor/github.com/go-ini/ini/struct.go
deleted file mode 100644
index 9719dc6985a..00000000000
--- a/agent/vendor/github.com/go-ini/ini/struct.go
+++ /dev/null
@@ -1,512 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package ini
-
-import (
- "bytes"
- "errors"
- "fmt"
- "reflect"
- "strings"
- "time"
- "unicode"
-)
-
-// NameMapper represents a ini tag name mapper.
-type NameMapper func(string) string
-
-// Built-in name getters.
-var (
- // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
- AllCapsUnderscore NameMapper = func(raw string) string {
- newstr := make([]rune, 0, len(raw))
- for i, chr := range raw {
- if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
- if i > 0 {
- newstr = append(newstr, '_')
- }
- }
- newstr = append(newstr, unicode.ToUpper(chr))
- }
- return string(newstr)
- }
- // TitleUnderscore converts to format title_underscore.
- TitleUnderscore NameMapper = func(raw string) string {
- newstr := make([]rune, 0, len(raw))
- for i, chr := range raw {
- if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
- if i > 0 {
- newstr = append(newstr, '_')
- }
- chr -= ('A' - 'a')
- }
- newstr = append(newstr, chr)
- }
- return string(newstr)
- }
-)
-
-func (s *Section) parseFieldName(raw, actual string) string {
- if len(actual) > 0 {
- return actual
- }
- if s.f.NameMapper != nil {
- return s.f.NameMapper(raw)
- }
- return raw
-}
-
-func parseDelim(actual string) string {
- if len(actual) > 0 {
- return actual
- }
- return ","
-}
-
-var reflectTime = reflect.TypeOf(time.Now()).Kind()
-
-// setSliceWithProperType sets proper values to slice based on its type.
-func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
- var strs []string
- if allowShadow {
- strs = key.StringsWithShadows(delim)
- } else {
- strs = key.Strings(delim)
- }
-
- numVals := len(strs)
- if numVals == 0 {
- return nil
- }
-
- var vals interface{}
- var err error
-
- sliceOf := field.Type().Elem().Kind()
- switch sliceOf {
- case reflect.String:
- vals = strs
- case reflect.Int:
- vals, err = key.parseInts(strs, true, false)
- case reflect.Int64:
- vals, err = key.parseInt64s(strs, true, false)
- case reflect.Uint:
- vals, err = key.parseUints(strs, true, false)
- case reflect.Uint64:
- vals, err = key.parseUint64s(strs, true, false)
- case reflect.Float64:
- vals, err = key.parseFloat64s(strs, true, false)
- case reflectTime:
- vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
- default:
- return fmt.Errorf("unsupported type '[]%s'", sliceOf)
- }
- if err != nil && isStrict {
- return err
- }
-
- slice := reflect.MakeSlice(field.Type(), numVals, numVals)
- for i := 0; i < numVals; i++ {
- switch sliceOf {
- case reflect.String:
- slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
- case reflect.Int:
- slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
- case reflect.Int64:
- slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
- case reflect.Uint:
- slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
- case reflect.Uint64:
- slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
- case reflect.Float64:
- slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
- case reflectTime:
- slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
- }
- }
- field.Set(slice)
- return nil
-}
-
-func wrapStrictError(err error, isStrict bool) error {
- if isStrict {
- return err
- }
- return nil
-}
-
-// setWithProperType sets proper value to field based on its type,
-// but it does not return error for failing parsing,
-// because we want to use default value that is already assigned to strcut.
-func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
- switch t.Kind() {
- case reflect.String:
- if len(key.String()) == 0 {
- return nil
- }
- field.SetString(key.String())
- case reflect.Bool:
- boolVal, err := key.Bool()
- if err != nil {
- return wrapStrictError(err, isStrict)
- }
- field.SetBool(boolVal)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- durationVal, err := key.Duration()
- // Skip zero value
- if err == nil && int64(durationVal) > 0 {
- field.Set(reflect.ValueOf(durationVal))
- return nil
- }
-
- intVal, err := key.Int64()
- if err != nil {
- return wrapStrictError(err, isStrict)
- }
- field.SetInt(intVal)
- // byte is an alias for uint8, so supporting uint8 breaks support for byte
- case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- durationVal, err := key.Duration()
- // Skip zero value
- if err == nil && int(durationVal) > 0 {
- field.Set(reflect.ValueOf(durationVal))
- return nil
- }
-
- uintVal, err := key.Uint64()
- if err != nil {
- return wrapStrictError(err, isStrict)
- }
- field.SetUint(uintVal)
-
- case reflect.Float32, reflect.Float64:
- floatVal, err := key.Float64()
- if err != nil {
- return wrapStrictError(err, isStrict)
- }
- field.SetFloat(floatVal)
- case reflectTime:
- timeVal, err := key.Time()
- if err != nil {
- return wrapStrictError(err, isStrict)
- }
- field.Set(reflect.ValueOf(timeVal))
- case reflect.Slice:
- return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
- default:
- return fmt.Errorf("unsupported type '%s'", t)
- }
- return nil
-}
-
-func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
- opts := strings.SplitN(tag, ",", 3)
- rawName = opts[0]
- if len(opts) > 1 {
- omitEmpty = opts[1] == "omitempty"
- }
- if len(opts) > 2 {
- allowShadow = opts[2] == "allowshadow"
- }
- return rawName, omitEmpty, allowShadow
-}
-
-func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
- if val.Kind() == reflect.Ptr {
- val = val.Elem()
- }
- typ := val.Type()
-
- for i := 0; i < typ.NumField(); i++ {
- field := val.Field(i)
- tpField := typ.Field(i)
-
- tag := tpField.Tag.Get("ini")
- if tag == "-" {
- continue
- }
-
- rawName, _, allowShadow := parseTagOptions(tag)
- fieldName := s.parseFieldName(tpField.Name, rawName)
- if len(fieldName) == 0 || !field.CanSet() {
- continue
- }
-
- isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
- isStruct := tpField.Type.Kind() == reflect.Struct
- if isAnonymous {
- field.Set(reflect.New(tpField.Type.Elem()))
- }
-
- if isAnonymous || isStruct {
- if sec, err := s.f.GetSection(fieldName); err == nil {
- if err = sec.mapTo(field, isStrict); err != nil {
- return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
- }
- continue
- }
- }
-
- if key, err := s.GetKey(fieldName); err == nil {
- delim := parseDelim(tpField.Tag.Get("delim"))
- if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
- return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
- }
- }
- }
- return nil
-}
-
-// MapTo maps section to given struct.
-func (s *Section) MapTo(v interface{}) error {
- typ := reflect.TypeOf(v)
- val := reflect.ValueOf(v)
- if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
- val = val.Elem()
- } else {
- return errors.New("cannot map to non-pointer struct")
- }
-
- return s.mapTo(val, false)
-}
-
-// MapTo maps section to given struct in strict mode,
-// which returns all possible error including value parsing error.
-func (s *Section) StrictMapTo(v interface{}) error {
- typ := reflect.TypeOf(v)
- val := reflect.ValueOf(v)
- if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
- val = val.Elem()
- } else {
- return errors.New("cannot map to non-pointer struct")
- }
-
- return s.mapTo(val, true)
-}
-
-// MapTo maps file to given struct.
-func (f *File) MapTo(v interface{}) error {
- return f.Section("").MapTo(v)
-}
-
-// MapTo maps file to given struct in strict mode,
-// which returns all possible error including value parsing error.
-func (f *File) StrictMapTo(v interface{}) error {
- return f.Section("").StrictMapTo(v)
-}
-
-// MapTo maps data sources to given struct with name mapper.
-func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
- cfg, err := Load(source, others...)
- if err != nil {
- return err
- }
- cfg.NameMapper = mapper
- return cfg.MapTo(v)
-}
-
-// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
-// which returns all possible error including value parsing error.
-func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
- cfg, err := Load(source, others...)
- if err != nil {
- return err
- }
- cfg.NameMapper = mapper
- return cfg.StrictMapTo(v)
-}
-
-// MapTo maps data sources to given struct.
-func MapTo(v, source interface{}, others ...interface{}) error {
- return MapToWithMapper(v, nil, source, others...)
-}
-
-// StrictMapTo maps data sources to given struct in strict mode,
-// which returns all possible error including value parsing error.
-func StrictMapTo(v, source interface{}, others ...interface{}) error {
- return StrictMapToWithMapper(v, nil, source, others...)
-}
-
-// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
-func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
- slice := field.Slice(0, field.Len())
- if field.Len() == 0 {
- return nil
- }
-
- var buf bytes.Buffer
- sliceOf := field.Type().Elem().Kind()
- for i := 0; i < field.Len(); i++ {
- switch sliceOf {
- case reflect.String:
- buf.WriteString(slice.Index(i).String())
- case reflect.Int, reflect.Int64:
- buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
- case reflect.Uint, reflect.Uint64:
- buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
- case reflect.Float64:
- buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
- case reflectTime:
- buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
- default:
- return fmt.Errorf("unsupported type '[]%s'", sliceOf)
- }
- buf.WriteString(delim)
- }
- key.SetValue(buf.String()[:buf.Len()-1])
- return nil
-}
-
-// reflectWithProperType does the opposite thing as setWithProperType.
-func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
- switch t.Kind() {
- case reflect.String:
- key.SetValue(field.String())
- case reflect.Bool:
- key.SetValue(fmt.Sprint(field.Bool()))
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- key.SetValue(fmt.Sprint(field.Int()))
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- key.SetValue(fmt.Sprint(field.Uint()))
- case reflect.Float32, reflect.Float64:
- key.SetValue(fmt.Sprint(field.Float()))
- case reflectTime:
- key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
- case reflect.Slice:
- return reflectSliceWithProperType(key, field, delim)
- default:
- return fmt.Errorf("unsupported type '%s'", t)
- }
- return nil
-}
-
-// CR: copied from encoding/json/encode.go with modifications of time.Time support.
-// TODO: add more test coverage.
-func isEmptyValue(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- case reflectTime:
- t, ok := v.Interface().(time.Time)
- return ok && t.IsZero()
- }
- return false
-}
-
-func (s *Section) reflectFrom(val reflect.Value) error {
- if val.Kind() == reflect.Ptr {
- val = val.Elem()
- }
- typ := val.Type()
-
- for i := 0; i < typ.NumField(); i++ {
- field := val.Field(i)
- tpField := typ.Field(i)
-
- tag := tpField.Tag.Get("ini")
- if tag == "-" {
- continue
- }
-
- opts := strings.SplitN(tag, ",", 2)
- if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
- continue
- }
-
- fieldName := s.parseFieldName(tpField.Name, opts[0])
- if len(fieldName) == 0 || !field.CanSet() {
- continue
- }
-
- if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
- (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
- // Note: The only error here is section doesn't exist.
- sec, err := s.f.GetSection(fieldName)
- if err != nil {
- // Note: fieldName can never be empty here, ignore error.
- sec, _ = s.f.NewSection(fieldName)
- }
-
- // Add comment from comment tag
- if len(sec.Comment) == 0 {
- sec.Comment = tpField.Tag.Get("comment")
- }
-
- if err = sec.reflectFrom(field); err != nil {
- return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
- }
- continue
- }
-
- // Note: Same reason as secion.
- key, err := s.GetKey(fieldName)
- if err != nil {
- key, _ = s.NewKey(fieldName, "")
- }
-
- // Add comment from comment tag
- if len(key.Comment) == 0 {
- key.Comment = tpField.Tag.Get("comment")
- }
-
- if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
- return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
- }
-
- }
- return nil
-}
-
-// ReflectFrom reflects secion from given struct.
-func (s *Section) ReflectFrom(v interface{}) error {
- typ := reflect.TypeOf(v)
- val := reflect.ValueOf(v)
- if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
- val = val.Elem()
- } else {
- return errors.New("cannot reflect from non-pointer struct")
- }
-
- return s.reflectFrom(val)
-}
-
-// ReflectFrom reflects file from given struct.
-func (f *File) ReflectFrom(v interface{}) error {
- return f.Section("").ReflectFrom(v)
-}
-
-// ReflectFrom reflects data sources from given struct with name mapper.
-func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
- cfg.NameMapper = mapper
- return cfg.ReflectFrom(v)
-}
-
-// ReflectFrom reflects data sources from given struct.
-func ReflectFrom(cfg *File, v interface{}) error {
- return ReflectFromWithMapper(cfg, v, nil)
-}
diff --git a/agent/vendor/github.com/golang/protobuf/AUTHORS b/agent/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 00000000000..15167cd746c
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/agent/vendor/github.com/golang/protobuf/CONTRIBUTORS b/agent/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 00000000000..1c4577e9680
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/agent/vendor/github.com/golang/protobuf/LICENSE b/agent/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 00000000000..0f646931a46
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/agent/vendor/github.com/golang/protobuf/proto/clone.go b/agent/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 00000000000..3cd3249f706
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+ in := reflect.ValueOf(src)
+ if in.IsNil() {
+ return src
+ }
+ out := reflect.New(in.Type().Elem())
+ dst := out.Interface().(Message)
+ Merge(dst, src)
+ return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+ // Merge merges src into this message.
+ // Required and optional fields that are set in src will be set to that value in dst.
+ // Elements of repeated fields will be appended.
+ //
+ // Merge may panic if called with a different argument type than the receiver.
+ Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+ XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ if m, ok := dst.(Merger); ok {
+ m.Merge(src)
+ return
+ }
+
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+ }
+ if in.IsNil() {
+ return // Merge from nil src is a noop
+ }
+ if m, ok := dst.(generatedMerger); ok {
+ m.XXX_Merge(src)
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/decode.go b/agent/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 00000000000..d9aa3c42d66
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,428 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+ XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ b := p.buf[p.index:]
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return io.ErrUnexpectedEOF
+ }
+ err := Unmarshal(b[:x], pb)
+ p.index += y
+ return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(newUnmarshaler); ok {
+ err := u.XXX_Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ // Slow workaround for messages that aren't Unmarshalers.
+ // This includes some hand-coded .pb.go files and
+ // bootstrap protos.
+ // TODO: fix all of those and then add Unmarshal to
+ // the Message interface. Then:
+ // The cast above and code below can be deleted.
+ // The old unmarshaler can be deleted.
+ // Clients can call Unmarshal directly (can already do that, actually).
+ var info InternalMessageInfo
+ err := info.Unmarshal(pb, p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/discard.go b/agent/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 00000000000..dea2617ced3
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type generatedDiscarder interface {
+ XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+ if m, ok := m.(generatedDiscarder); ok {
+ m.XXX_DiscardUnknown()
+ return
+ }
+ // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+ // but the master branch has no implementation for InternalMessageInfo,
+ // so it would be more work to replicate that approach.
+ discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+ di := atomicLoadDiscardInfo(&a.discard)
+ if di == nil {
+ di = getDiscardInfo(reflect.TypeOf(m).Elem())
+ atomicStoreDiscardInfo(&a.discard, di)
+ }
+ di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []discardFieldInfo
+ unrecognized field
+}
+
+type discardFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+ discard func(src pointer)
+}
+
+var (
+ discardInfoMap = map[reflect.Type]*discardInfo{}
+ discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+ discardInfoLock.Lock()
+ defer discardInfoLock.Unlock()
+ di := discardInfoMap[t]
+ if di == nil {
+ di = &discardInfo{typ: t}
+ discardInfoMap[t] = di
+ }
+ return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&di.initialized) == 0 {
+ di.computeDiscardInfo()
+ }
+
+ for _, fi := range di.fields {
+ sfp := src.offset(fi.field)
+ fi.discard(sfp)
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+ // Ignore lock since DiscardUnknown is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ DiscardUnknown(m)
+ }
+ }
+ }
+
+ if di.unrecognized.IsValid() {
+ *src.offset(di.unrecognized).toBytes() = nil
+ }
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+ di.lock.Lock()
+ defer di.lock.Unlock()
+ if di.initialized != 0 {
+ return
+ }
+ t := di.typ
+ n := t.NumField()
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ dfi := discardFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+ case isSlice: // E.g., []*pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sps := src.getPointerSlice()
+ for _, sp := range sps {
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ default: // E.g., *pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+ default: // E.g., map[K]V
+ if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+ dfi.discard = func(src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ DiscardUnknown(val.Interface().(Message))
+ }
+ }
+ } else {
+ dfi.discard = func(pointer) {} // Noop
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ dfi.discard = func(src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ DiscardUnknown(sv.Interface().(Message))
+ }
+ }
+ }
+ }
+ default:
+ continue
+ }
+ di.fields = append(di.fields, dfi)
+ }
+
+ di.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ di.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+ v := reflect.ValueOf(m)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return
+ }
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ vf := v.Field(i)
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+ case isSlice: // E.g., []*pb.T
+ for j := 0; j < vf.Len(); j++ {
+ discardLegacy(vf.Index(j).Interface().(Message))
+ }
+ default: // E.g., *pb.T
+ discardLegacy(vf.Interface().(Message))
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+ default: // E.g., map[K]V
+ tv := vf.Type().Elem()
+ if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+ for _, key := range vf.MapKeys() {
+ val := vf.MapIndex(key)
+ discardLegacy(val.Interface().(Message))
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+ default: // E.g., test_proto.isCommunique_Union interface
+ if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+ vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+ if !vf.IsNil() {
+ vf = vf.Elem() // E.g., test_proto.Communique_Msg
+ vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+ if vf.Kind() == reflect.Ptr {
+ discardLegacy(vf.Interface().(Message))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+ if vf.Type() != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ vf.Set(reflect.ValueOf([]byte(nil)))
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(m); err == nil {
+ // Ignore lock since discardLegacy is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ discardLegacy(m)
+ }
+ }
+ }
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/encode.go b/agent/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 00000000000..3abfed2cff0
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "reflect"
+)
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ switch {
+ case x < 1<<7:
+ return 1
+ case x < 1<<14:
+ return 2
+ case x < 1<<21:
+ return 3
+ case x < 1<<28:
+ return 4
+ case x < 1<<35:
+ return 5
+ case x < 1<<42:
+ return 6
+ case x < 1<<49:
+ return 7
+ case x < 1<<56:
+ return 8
+ case x < 1<<63:
+ return 9
+ }
+ return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ siz := Size(pb)
+ p.EncodeVarint(uint64(siz))
+ return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/equal.go b/agent/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 00000000000..d4db5a1c145
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 == nil && m2 == nil {
+ // Both have only encoded form.
+ if bytes.Equal(e1.enc, e2.enc) {
+ continue
+ }
+ // The bytes are different, but the extensions might still be
+ // equal. We need to decode them to compare.
+ }
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ // If both have only encoded form and the bytes are the same,
+ // it is handled above. We get here when the bytes are different.
+ // We don't know how to decode it, so just compare them as byte
+ // slices.
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ return false
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/extensions.go b/agent/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 00000000000..816a3b9d6c0
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,543 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+ switch p := p.(type) {
+ case extendableProto:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return p, nil
+ case extendableProtoV1:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return extensionAdapter{p}, nil
+ }
+ // Don't allocate a specific error containing %T:
+ // this is the hot path for Clone and MarshalText.
+ return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+ v := reflect.ValueOf(x)
+ return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ epb, err := extendable(base)
+ if err != nil {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ epb, err := extendable(pb)
+ if err != nil {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok := extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, extension.Field)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+
+ if extension.ExtendedType != nil {
+ // can only check type if this is a complete descriptor
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ if extension.ExtensionType == nil {
+ // incomplete descriptor
+ return e.enc, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ if extension.ExtensionType == nil {
+ // incomplete descriptor, so no default
+ return nil, ErrMissingExtension
+ }
+
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ unmarshal := typeUnmarshaler(t, extension.Tag)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate space to store the pointer/slice.
+ value := reflect.New(t).Elem()
+
+ var err error
+ for {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ wire := int(x) & 7
+
+ b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, err := extendable(pb)
+ if err != nil {
+ return err
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/lib.go b/agent/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 00000000000..75565cc6dcf
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,979 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+ if e.field == "" {
+ return fmt.Sprintf("proto: required field not set")
+ }
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+ if e.field == "" {
+ return "proto: invalid UTF-8 detected"
+ }
+ return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+ return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+ if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+ return true
+ }
+ if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+ return true
+ }
+ return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+ if err == nil {
+ return true // not an error
+ }
+ if !isNonFatal(err) {
+ return false // fatal error
+ }
+ if nf.E == nil {
+ nf.E = err // store first instance of non-fatal error
+ }
+ return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+ p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{vs: vs}
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ case reflect.Bool:
+ s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+ case reflect.String:
+ s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+ default:
+ panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+ marshal *marshalInfo
+ unmarshal *unmarshalInfo
+ merge *mergeInfo
+ discard *discardInfo
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/message_set.go b/agent/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 00000000000..3b6ca41d5e5
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,314 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "sync"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+ return marshalMessageSet(exts, false)
+}
+
+// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
+func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ var u marshalInfo
+ siz := u.sizeMessageSet(exts)
+ b := make([]byte, 0, siz)
+ return u.appendMessageSet(b, exts, deterministic)
+
+ case map[int32]Extension:
+ // This is an old-style extension map.
+ // Wrap it in a new-style XXX_InternalExtensions.
+ ie := XXX_InternalExtensions{
+ p: &struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }{
+ extensionMap: exts,
+ },
+ }
+
+ var u marshalInfo
+ siz := u.sizeMessageSet(&ie)
+ b := make([]byte, 0, siz)
+ return u.appendMessageSet(b, &ie, deterministic)
+
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ var mu sync.Locker
+ m, mu = exts.extensionsRead()
+ if m != nil {
+ // Keep the extensions map locked until we're done marshaling to prevent
+ // races between marshaling and unmarshaling the lazily-{en,de}coded
+ // values.
+ mu.Lock()
+ defer mu.Unlock()
+ }
+ case map[int32]Extension:
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+
+ if i > 0 && b.Len() > 1 {
+ b.WriteByte(',')
+ }
+
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/agent/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 00000000000..b6cad90834b
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,357 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "reflect"
+ "sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+ v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ v := reflect.ValueOf(*i)
+ u := reflect.New(v.Type())
+ u.Elem().Set(v)
+ return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+ return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+ n, m := s.Len(), s.Cap()
+ if n < m {
+ s.SetLen(n + 1)
+ } else {
+ s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+ }
+ return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+ return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+ return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return p.v.Interface().(**int32)
+}
+ func (p pointer) toInt32Slice() *[]int32 {
+ return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().(*int32)
+ }
+ // an enum
+ return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ // Allocate value in a *int32. Possibly convert that to a *enum.
+ // Then assign it to a **int32 or **enum.
+ // Note: we can convert *int32 to *enum, but we can't convert
+ // **int32 to **enum!
+ p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().([]int32)
+ }
+ // an enum
+ // Allocate a []int32, then assign []enum's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := p.v.Elem()
+ s := make([]int32, slice.Len())
+ for i := 0; i < slice.Len(); i++ {
+ s[i] = int32(slice.Index(i).Int())
+ }
+ return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ p.v.Elem().Set(reflect.ValueOf(v))
+ return
+ }
+ // an enum
+ // Allocate a []enum, then assign []int32's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+ for i, x := range v {
+ slice.Index(i).SetInt(int64(x))
+ }
+ p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+ grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+ return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+ return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+ return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+ return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+ return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+ return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+ return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+ return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+ return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+ return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+ return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+ p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+ grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ if v == nil {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+ return
+ }
+ s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+ for _, p := range v {
+ s = reflect.Append(s, p.v)
+ }
+ p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ if p.v.Elem().IsNil() {
+ return pointer{v: p.v.Elem()}
+ }
+ return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ // TODO: check that p.v.Type().Elem() == t?
+ return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/agent/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/agent/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 00000000000..d55a335d945
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,308 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "sync/atomic"
+ "unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+ p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ // Saves ~25ns over the equivalent:
+ // return valToPointer(reflect.ValueOf(*i))
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ // Super-tricky - read or get the address of data word of interface value.
+ if isptr {
+ // The interface is of pointer type, thus it is a direct interface.
+ // The data word is the pointer data itself. We take its address.
+ return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ }
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ // For safety, we should panic if !f.IsValid, however calling panic causes
+ // this to no longer be inlineable, which is a serious performance cost.
+ /*
+ if !f.IsValid() {
+ panic("invalid field")
+ }
+ */
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+ return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+ return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+ return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return (**int32)(p.p)
+ }
+ func (p pointer) toInt32Slice() *[]int32 {
+ return (*[]int32)(p.p)
+ }
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ *(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+ return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+ *(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+ s := (*[]int32)(p.p)
+ *s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+ return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+ return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+ return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+ return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+ return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+ return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+ return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+ return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+ return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+ return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We store it as []pointer.
+ *(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+ *(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+ s := (*[]unsafe.Pointer)(p.p)
+ *s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/properties.go b/agent/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 00000000000..50b99b83a8c
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,544 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+
+ mtype reflect.Type // set for map types only
+ MapKeyProp *Properties // set for map types only
+ MapValProp *Properties // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s += ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ case "zigzag64":
+ p.WireType = WireVarint
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+outer:
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break outer
+ }
+ }
+ }
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ switch t1 := typ; t1.Kind() {
+ case reflect.Ptr:
+ if t1.Elem().Kind() == reflect.Struct {
+ p.stype = t1.Elem()
+ }
+
+ case reflect.Slice:
+ if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+ p.stype = t2.Elem()
+ }
+
+ case reflect.Map:
+ p.mtype = t1
+ p.MapKeyProp = &Properties{}
+ p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.MapValProp = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ _, _, _, oots = om.XXX_OneofFuncs()
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
+ protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypedNils[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+ // Generated code always calls RegisterType with nil x.
+ // This check is just for extra safety.
+ protoTypedNils[name] = x
+ } else {
+ protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+ }
+ revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+ if reflect.TypeOf(x).Kind() != reflect.Map {
+ panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+ }
+ if _, ok := protoMapTypes[name]; ok {
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoMapTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+ if t, ok := protoTypedNils[name]; ok {
+ return reflect.TypeOf(t)
+ }
+ return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/agent/vendor/github.com/golang/protobuf/proto/table_marshal.go b/agent/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 00000000000..b16794496f5
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2767 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+ typ reflect.Type
+ fields []*marshalFieldInfo
+ unrecognized field // offset of XXX_unrecognized
+ extensions field // offset of XXX_InternalExtensions
+ v1extensions field // offset of XXX_extensions
+ sizecache field // offset of XXX_sizecache
+ initialized int32 // 0 -- only typ is set, 1 -- fully initialized
+ messageset bool // uses message set wire format
+ hasmarshaler bool // has custom marshaler
+ sync.RWMutex // protect extElems map, also for initialization
+ extElems map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+ field field
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isPointer bool
+ required bool // field is required
+ name string // name of the field, for error reporting
+ oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+}
+
+var (
+ marshalInfoMap = map[reflect.Type]*marshalInfo{}
+ marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+ marshalInfoLock.Lock()
+ u, ok := marshalInfoMap[t]
+ if !ok {
+ u = &marshalInfo{typ: t}
+ marshalInfoMap[t] = u
+ }
+ marshalInfoLock.Unlock()
+ return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return 0
+ }
+ return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return b, ErrNil
+ }
+ return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+ // u := a.marshal, but atomically.
+ // We use an atomic here to ensure memory consistency.
+ u := atomicLoadMarshalInfo(&a.marshal)
+ if u == nil {
+ // Get marshal information from type of message.
+ t := reflect.ValueOf(msg).Type()
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+ }
+ u = getMarshalInfo(t.Elem())
+ // Store it in the cache for later users.
+ // a.marshal = u, but atomically.
+ atomicStoreMarshalInfo(&a.marshal, u)
+ }
+ return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ n := 0
+ for _, f := range u.fields {
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ n += f.sizer(ptr.offset(f.field), f.tagsize)
+ }
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ n += u.sizeMessageSet(e)
+ } else {
+ n += u.sizeExtensions(e)
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ n += u.sizeV1Extensions(m)
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ n += len(s)
+ }
+ // cache the result for use in marshal
+ if u.sizecache.IsValid() {
+ atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+ }
+ return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+ if u.sizecache.IsValid() {
+ return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+ }
+ return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b1, err := m.Marshal()
+ b = append(b, b1...)
+ return b, err
+ }
+
+ var err, errLater error
+ // The old marshaler encodes extensions at beginning.
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ b, err = u.appendMessageSet(b, e, deterministic)
+ } else {
+ b, err = u.appendExtensions(b, e, deterministic)
+ }
+ if err != nil {
+ return b, err
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ b, err = u.appendV1Extensions(b, m, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range u.fields {
+ if f.required {
+ if ptr.offset(f.field).getPointer().isNil() {
+ // Required field is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name}
+ }
+ continue
+ }
+ }
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+ if err != nil {
+ if err1, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name + "." + err1.field}
+ }
+ continue
+ }
+ if err == errRepeatedHasNil {
+ err = errors.New("proto: repeated field " + f.name + " has nil element")
+ }
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return b, err
+ }
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ b = append(b, s...)
+ }
+ return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+ u.Lock()
+ defer u.Unlock()
+ if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+ return
+ }
+
+ t := u.typ
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.v1extensions = invalidField
+ u.sizecache = invalidField
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ u.hasmarshaler = true
+ atomic.StoreInt32(&u.initialized, 1)
+ return
+ }
+
+ // get oneof implementers
+ var oneofImplementers []interface{}
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ }
+
+ n := t.NumField()
+
+ // deal with XXX fields first
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ switch f.Name {
+ case "XXX_sizecache":
+ u.sizecache = toField(&f)
+ case "XXX_unrecognized":
+ u.unrecognized = toField(&f)
+ case "XXX_InternalExtensions":
+ u.extensions = toField(&f)
+ u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+ case "XXX_extensions":
+ u.v1extensions = toField(&f)
+ case "XXX_NoUnkeyedLiteral":
+ // nothing to do
+ default:
+ panic("unknown XXX field: " + f.Name)
+ }
+ n--
+ }
+
+ // normal fields
+ fields := make([]marshalFieldInfo, n) // batch allocation
+ u.fields = make([]*marshalFieldInfo, 0, n)
+ for i, j := 0, 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ field := &fields[j]
+ j++
+ field.name = f.Name
+ u.fields = append(u.fields, field)
+ if f.Tag.Get("protobuf_oneof") != "" {
+ field.computeOneofFieldInfo(&f, oneofImplementers)
+ continue
+ }
+ if f.Tag.Get("protobuf") == "" {
+ // field has no tag (not in generated message), ignore it
+ u.fields = u.fields[:len(u.fields)-1]
+ j--
+ continue
+ }
+ field.computeMarshalFieldInfo(&f)
+ }
+
+ // fields are marshaled in tag order on the wire.
+ sort.Sort(byTag(u.fields))
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int { return len(a) }
+func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+ // get from cache first
+ u.RLock()
+ e, ok := u.extElems[desc.Field]
+ u.RUnlock()
+ if ok {
+ return e
+ }
+
+ t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+ tags := strings.Split(desc.Tag, ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(t, tags, false, false)
+ e = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ isptr: t.Kind() == reflect.Ptr,
+ }
+
+ // update cache
+ u.Lock()
+ if u.extElems == nil {
+ u.extElems = make(map[int32]*marshalElemInfo)
+ }
+ u.extElems[desc.Field] = e
+ u.Unlock()
+ return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+ // parse protobuf tag of the field.
+ // tag has format of "bytes,49,opt,name=foo,def=hello!"
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ if tags[0] == "" {
+ return
+ }
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if tags[2] == "req" {
+ fi.required = true
+ }
+ fi.setTag(f, tag, wt)
+ fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+ fi.field = toField(f)
+ fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+ fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+ ityp := f.Type // interface type
+ for _, o := range oneofImplementers {
+ t := reflect.TypeOf(o)
+ if !t.Implements(ityp) {
+ continue
+ }
+ sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+ tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+ fi.oneofElems[t.Elem()] = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ }
+ }
+}
+
+type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+ switch encoding {
+ case "fixed32":
+ return WireFixed32
+ case "fixed64":
+ return WireFixed64
+ case "varint", "zigzag32", "zigzag64":
+ return WireVarint
+ case "bytes":
+ return WireBytes
+ case "group":
+ return WireStartGroup
+ }
+ panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+ fi.field = toField(f)
+ fi.wiretag = uint64(tag)<<3 | wt
+ fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+ switch f.Type.Kind() {
+ case reflect.Map:
+ // map field
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeMapMarshaler(f)
+ return
+ case reflect.Ptr, reflect.Slice:
+ fi.isPointer = true
+ }
+ fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+ encoding := tags[0]
+
+ pointer := false
+ slice := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ packed := false
+ proto3 := false
+ validateUTF8 := true
+ for i := 2; i < len(tags); i++ {
+ if tags[i] == "packed" {
+ packed = true
+ }
+ if tags[i] == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return sizeBoolPtr, appendBoolPtr
+ }
+ if slice {
+ if packed {
+ return sizeBoolPackedSlice, appendBoolPackedSlice
+ }
+ return sizeBoolSlice, appendBoolSlice
+ }
+ if nozero {
+ return sizeBoolValueNoZero, appendBoolValueNoZero
+ }
+ return sizeBoolValue, appendBoolValue
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixed32Ptr, appendFixed32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed32PackedSlice, appendFixed32PackedSlice
+ }
+ return sizeFixed32Slice, appendFixed32Slice
+ }
+ if nozero {
+ return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+ }
+ return sizeFixed32Value, appendFixed32Value
+ case "varint":
+ if pointer {
+ return sizeVarint32Ptr, appendVarint32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint32PackedSlice, appendVarint32PackedSlice
+ }
+ return sizeVarint32Slice, appendVarint32Slice
+ }
+ if nozero {
+ return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+ }
+ return sizeVarint32Value, appendVarint32Value
+ }
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixedS32Ptr, appendFixedS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+ }
+ return sizeFixedS32Slice, appendFixedS32Slice
+ }
+ if nozero {
+ return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+ }
+ return sizeFixedS32Value, appendFixedS32Value
+ case "varint":
+ if pointer {
+ return sizeVarintS32Ptr, appendVarintS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+ }
+ return sizeVarintS32Slice, appendVarintS32Slice
+ }
+ if nozero {
+ return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+ }
+ return sizeVarintS32Value, appendVarintS32Value
+ case "zigzag32":
+ if pointer {
+ return sizeZigzag32Ptr, appendZigzag32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+ }
+ return sizeZigzag32Slice, appendZigzag32Slice
+ }
+ if nozero {
+ return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+ }
+ return sizeZigzag32Value, appendZigzag32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixed64Ptr, appendFixed64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed64PackedSlice, appendFixed64PackedSlice
+ }
+ return sizeFixed64Slice, appendFixed64Slice
+ }
+ if nozero {
+ return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+ }
+ return sizeFixed64Value, appendFixed64Value
+ case "varint":
+ if pointer {
+ return sizeVarint64Ptr, appendVarint64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint64PackedSlice, appendVarint64PackedSlice
+ }
+ return sizeVarint64Slice, appendVarint64Slice
+ }
+ if nozero {
+ return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+ }
+ return sizeVarint64Value, appendVarint64Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixedS64Ptr, appendFixedS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+ }
+ return sizeFixedS64Slice, appendFixedS64Slice
+ }
+ if nozero {
+ return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+ }
+ return sizeFixedS64Value, appendFixedS64Value
+ case "varint":
+ if pointer {
+ return sizeVarintS64Ptr, appendVarintS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+ }
+ return sizeVarintS64Slice, appendVarintS64Slice
+ }
+ if nozero {
+ return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+ }
+ return sizeVarintS64Value, appendVarintS64Value
+ case "zigzag64":
+ if pointer {
+ return sizeZigzag64Ptr, appendZigzag64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+ }
+ return sizeZigzag64Slice, appendZigzag64Slice
+ }
+ if nozero {
+ return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+ }
+ return sizeZigzag64Value, appendZigzag64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return sizeFloat32Ptr, appendFloat32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat32PackedSlice, appendFloat32PackedSlice
+ }
+ return sizeFloat32Slice, appendFloat32Slice
+ }
+ if nozero {
+ return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+ }
+ return sizeFloat32Value, appendFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return sizeFloat64Ptr, appendFloat64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat64PackedSlice, appendFloat64PackedSlice
+ }
+ return sizeFloat64Slice, appendFloat64Slice
+ }
+ if nozero {
+ return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+ }
+ return sizeFloat64Value, appendFloat64Value
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return sizeStringPtr, appendUTF8StringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendUTF8StringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendUTF8StringValueNoZero
+ }
+ return sizeStringValue, appendUTF8StringValue
+ }
+ if pointer {
+ return sizeStringPtr, appendStringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendStringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendStringValueNoZero
+ }
+ return sizeStringValue, appendStringValue
+ case reflect.Slice:
+ if slice {
+ return sizeBytesSlice, appendBytesSlice
+ }
+ if oneof {
+ // Oneof bytes field may also have "proto3" tag.
+ // We want to marshal it as a oneof field. Do this
+ // check before the proto3 check.
+ return sizeBytesOneof, appendBytesOneof
+ }
+ if proto3 {
+ return sizeBytes3, appendBytes3
+ }
+ return sizeBytes, appendBytes
+ case reflect.Struct:
+ switch encoding {
+ case "group":
+ if slice {
+ return makeGroupSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeGroupMarshaler(getMarshalInfo(t))
+ case "bytes":
+ if slice {
+ return makeMessageSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeMessageMarshaler(getMarshalInfo(t))
+ }
+ }
+ panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v) + tagsize
+ }
+ return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+ }
+ return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+ }
+ return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+ return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toBool()
+ if !v {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ if v == "" {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toStringSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if v == nil {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBytesSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+ return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+ return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+ // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+ // have non-leaf inliner.
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, *p)
+ return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(*p))
+ return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(*p))
+ return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, *p)
+ return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(*p))
+ return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(*p))
+ return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, *p)
+ return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ if !v {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = append(b, 1)
+ return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ if *p {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(len(s)))
+ for _, v := range s {
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if v == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBytesSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ return u.size(p) + 2*tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ var err error
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, p, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ return b, err
+ }
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ n += u.size(v) + 2*tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, v, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.size(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(p)
+ b = appendVarint(b, uint64(siz))
+ return u.marshal(b, p, deterministic)
+ }
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ siz := u.size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(v)
+ b = appendVarint(b, uint64(siz))
+ b, err = u.marshal(b, v, deterministic)
+
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+ // figure out key and value type
+ t := f.Type
+ keyType := t.Key()
+ valType := t.Elem()
+ keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+ valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+ keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+ valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+ keyWireTag := 1<<3 | wiretype(keyTags[0])
+ valWireTag := 2<<3 | wiretype(valTags[0])
+
+ // We create an interface to get the addresses of the map key and value.
+ // If value is pointer-typed, the interface is a direct interface, the
+ // idata itself is the value. Otherwise, the idata is the pointer to the
+ // value.
+ // Key cannot be pointer-typed.
+ valIsPtr := valType.Kind() == reflect.Ptr
+
+ // If value is a message with nested maps, calling
+ // valSizer in marshal may be quadratic. We should use
+ // cached version in marshal (but not in size).
+ // If value is not message type, we don't have size cache,
+ // but it cannot be nested either. Just use valSizer.
+ valCachedSizer := valSizer
+ if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+ u := getMarshalInfo(valType.Elem())
+ valCachedSizer = func(ptr pointer, tagsize int) int {
+ // Same as message sizer, but use cache.
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.cachedsize(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ }
+ return func(ptr pointer, tagsize int) int {
+ m := ptr.asPointerTo(t).Elem() // the map
+ n := 0
+ for _, k := range m.MapKeys() {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+ m := ptr.asPointerTo(t).Elem() // the map
+ var err error
+ keys := m.MapKeys()
+ if len(keys) > 1 && deterministic {
+ sort.Sort(mapKeys(keys))
+ }
+
+ var nerr nonFatal
+ for _, k := range keys {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ b = appendVarint(b, tag)
+ siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ b = appendVarint(b, uint64(siz))
+ b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+ if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+ // Oneof field is an interface. We need to get the actual data type on the fly.
+ t := f.Type
+ return func(ptr pointer, _ int) int {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return 0
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ e := fi.oneofElems[telem]
+ return e.sizer(p, e.tagsize)
+ },
+ func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return b, nil
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+ return b, errOneofHasNil
+ }
+ e := fi.oneofElems[telem]
+ return e.marshaler(b, p, e.wiretag, deterministic)
+ }
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// message set format is:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for id, e := range m {
+ n += 2 // start group, end group. tag = 1 (size=1)
+ n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ siz := len(msgWithLen)
+ n += siz + 1 // message, tag = 3 (size=1)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for id, e := range m {
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b = append(b, 1<<3|WireEndGroup)
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, id := range keys {
+ e := m[int32(id)]
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ b = append(b, 1<<3|WireEndGroup)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+ if m == nil {
+ return 0
+ }
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ var err error
+ var nerr nonFatal
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+ XXX_Size() int
+ XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+ if m, ok := pb.(newMarshaler); ok {
+ return m.XXX_Size()
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, _ := m.Marshal()
+ return len(b)
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return 0
+ }
+ var info InternalMessageInfo
+ return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ b := make([]byte, 0, siz)
+ return m.XXX_Marshal(b, false)
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ return m.Marshal()
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return nil, ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ b := make([]byte, 0, siz)
+ return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+ var err error
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+ return err
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, err := m.Marshal()
+ p.buf = append(p.buf, b...)
+ return err
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+ return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+ need := len(p.buf) + n
+ if need <= cap(p.buf) {
+ return
+ }
+ newCap := len(p.buf) * 2
+ if newCap < need {
+ newCap = need
+ }
+ p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/table_merge.go b/agent/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 00000000000..5525def6a5d
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+ mi := atomicLoadMergeInfo(&a.merge)
+ if mi == nil {
+ mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+ atomicStoreMergeInfo(&a.merge, mi)
+ }
+ mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []mergeFieldInfo
+ unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+
+ // isPointer reports whether the value in the field is a pointer.
+ // This is true for the following situations:
+ // * Pointer to struct
+ // * Pointer to basic type (proto2 only)
+ // * Slice (first value in slice header is a pointer)
+ // * String (first value in string header is a pointer)
+ isPointer bool
+
+ // basicWidth reports the width of the field assuming that it is directly
+ // embedded in the struct (as is the case for basic types in proto3).
+ // The possible values are:
+ // 0: invalid
+ // 1: bool
+ // 4: int32, uint32, float32
+ // 8: int64, uint64, float64
+ basicWidth int
+
+ // Where dst and src are pointers to the types being merged.
+ merge func(dst, src pointer)
+}
+
+var (
+ mergeInfoMap = map[reflect.Type]*mergeInfo{}
+ mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+ mergeInfoLock.Lock()
+ defer mergeInfoLock.Unlock()
+ mi := mergeInfoMap[t]
+ if mi == nil {
+ mi = &mergeInfo{typ: t}
+ mergeInfoMap[t] = mi
+ }
+ return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+ if dst.isNil() {
+ panic("proto: nil destination")
+ }
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&mi.initialized) == 0 {
+ mi.computeMergeInfo()
+ }
+
+ for _, fi := range mi.fields {
+ sfp := src.offset(fi.field)
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+ continue
+ }
+ if fi.basicWidth > 0 {
+ switch {
+ case fi.basicWidth == 1 && !*sfp.toBool():
+ continue
+ case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+ continue
+ case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+ continue
+ }
+ }
+ }
+
+ dfp := dst.offset(fi.field)
+ fi.merge(dfp, sfp)
+ }
+
+ // TODO: Make this faster?
+ out := dst.asPointerTo(mi.typ).Elem()
+ in := src.asPointerTo(mi.typ).Elem()
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ if mi.unrecognized.IsValid() {
+ if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+ *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+ }
+ }
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+ mi.lock.Lock()
+ defer mi.lock.Unlock()
+ if mi.initialized != 0 {
+ return
+ }
+ t := mi.typ
+ n := t.NumField()
+
+ props := GetProperties(t)
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ mfi := mergeFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ switch tf.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.String:
+ // As a special case, we assume slices and strings are pointers
+ // since we know that the first field in the SliceSlice or
+ // StringHeader is a data pointer.
+ mfi.isPointer = true
+ case reflect.Bool:
+ mfi.basicWidth = 1
+ case reflect.Int32, reflect.Uint32, reflect.Float32:
+ mfi.basicWidth = 4
+ case reflect.Int64, reflect.Uint64, reflect.Float64:
+ mfi.basicWidth = 8
+ }
+ }
+
+ // Unwrap tf to get at its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + tf.Name())
+ }
+
+ switch tf.Kind() {
+ case reflect.Int32:
+ switch {
+ case isSlice: // E.g., []int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+ /*
+ sfsp := src.toInt32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ */
+ sfs := src.getInt32Slice()
+ if sfs != nil {
+ dfs := dst.getInt32Slice()
+ dfs = append(dfs, sfs...)
+ if dfs == nil {
+ dfs = []int32{}
+ }
+ dst.setInt32Slice(dfs)
+ }
+ }
+ case isPointer: // E.g., *int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+ /*
+ sfpp := src.toInt32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt32Ptr()
+ if *dfpp == nil {
+ *dfpp = Int32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ */
+ sfp := src.getInt32Ptr()
+ if sfp != nil {
+ dfp := dst.getInt32Ptr()
+ if dfp == nil {
+ dst.setInt32Ptr(*sfp)
+ } else {
+ *dfp = *sfp
+ }
+ }
+ }
+ default: // E.g., int32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt32(); v != 0 {
+ *dst.toInt32() = v
+ }
+ }
+ }
+ case reflect.Int64:
+ switch {
+ case isSlice: // E.g., []int64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toInt64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *int64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toInt64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt64Ptr()
+ if *dfpp == nil {
+ *dfpp = Int64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., int64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt64(); v != 0 {
+ *dst.toInt64() = v
+ }
+ }
+ }
+ case reflect.Uint32:
+ switch {
+ case isSlice: // E.g., []uint32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint32Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint32(); v != 0 {
+ *dst.toUint32() = v
+ }
+ }
+ }
+ case reflect.Uint64:
+ switch {
+ case isSlice: // E.g., []uint64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint64Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint64(); v != 0 {
+ *dst.toUint64() = v
+ }
+ }
+ }
+ case reflect.Float32:
+ switch {
+ case isSlice: // E.g., []float32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat32Ptr()
+ if *dfpp == nil {
+ *dfpp = Float32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat32(); v != 0 {
+ *dst.toFloat32() = v
+ }
+ }
+ }
+ case reflect.Float64:
+ switch {
+ case isSlice: // E.g., []float64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat64Ptr()
+ if *dfpp == nil {
+ *dfpp = Float64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat64(); v != 0 {
+ *dst.toFloat64() = v
+ }
+ }
+ }
+ case reflect.Bool:
+ switch {
+ case isSlice: // E.g., []bool
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toBoolSlice()
+ if *sfsp != nil {
+ dfsp := dst.toBoolSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []bool{}
+ }
+ }
+ }
+ case isPointer: // E.g., *bool
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toBoolPtr()
+ if *sfpp != nil {
+ dfpp := dst.toBoolPtr()
+ if *dfpp == nil {
+ *dfpp = Bool(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., bool
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toBool(); v {
+ *dst.toBool() = v
+ }
+ }
+ }
+ case reflect.String:
+ switch {
+ case isSlice: // E.g., []string
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toStringSlice()
+ if *sfsp != nil {
+ dfsp := dst.toStringSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []string{}
+ }
+ }
+ }
+ case isPointer: // E.g., *string
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toStringPtr()
+ if *sfpp != nil {
+ dfpp := dst.toStringPtr()
+ if *dfpp == nil {
+ *dfpp = String(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., string
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toString(); v != "" {
+ *dst.toString() = v
+ }
+ }
+ }
+ case reflect.Slice:
+ isProto3 := props.Prop[i].proto3
+ switch {
+ case isPointer:
+ panic("bad pointer in byte slice case in " + tf.Name())
+ case tf.Elem().Kind() != reflect.Uint8:
+ panic("bad element kind in byte slice case in " + tf.Name())
+ case isSlice: // E.g., [][]byte
+ mfi.merge = func(dst, src pointer) {
+ sbsp := src.toBytesSlice()
+ if *sbsp != nil {
+ dbsp := dst.toBytesSlice()
+ for _, sb := range *sbsp {
+ if sb == nil {
+ *dbsp = append(*dbsp, nil)
+ } else {
+ *dbsp = append(*dbsp, append([]byte{}, sb...))
+ }
+ }
+ if *dbsp == nil {
+ *dbsp = [][]byte{}
+ }
+ }
+ }
+ default: // E.g., []byte
+ mfi.merge = func(dst, src pointer) {
+ sbp := src.toBytes()
+ if *sbp != nil {
+ dbp := dst.toBytes()
+ if !isProto3 || len(*sbp) > 0 {
+ *dbp = append([]byte{}, *sbp...)
+ }
+ }
+ }
+ }
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("message field %s without pointer", tf))
+ case isSlice: // E.g., []*pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sps := src.getPointerSlice()
+ if sps != nil {
+ dps := dst.getPointerSlice()
+ for _, sp := range sps {
+ var dp pointer
+ if !sp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ mi.merge(dp, sp)
+ }
+ dps = append(dps, dp)
+ }
+ if dps == nil {
+ dps = []pointer{}
+ }
+ dst.setPointerSlice(dps)
+ }
+ }
+ default: // E.g., *pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ dp := dst.getPointer()
+ if dp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ dst.setPointer(dp)
+ }
+ mi.merge(dp, sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in map case in " + tf.Name())
+ default: // E.g., map[K]V
+ mfi.merge = func(dst, src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ dm := dst.asPointerTo(tf).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.MakeMap(tf))
+ }
+
+ switch tf.Elem().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(Clone(val.Interface().(Message)))
+ dm.SetMapIndex(key, val)
+ }
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ dm.SetMapIndex(key, val)
+ }
+ default: // Basic type (e.g., string)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ dm.SetMapIndex(key, val)
+ }
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in interface case in " + tf.Name())
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ mfi.merge = func(dst, src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ du := dst.asPointerTo(tf).Elem()
+ typ := su.Elem().Type()
+ if du.IsNil() || du.Elem().Type() != typ {
+ du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+ }
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ dv := du.Elem().Elem().Field(0)
+ if dv.Kind() == reflect.Ptr && dv.IsNil() {
+ dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ Merge(dv.Interface().(Message), sv.Interface().(Message))
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+ default: // Basic type (e.g., string)
+ dv.Set(sv)
+ }
+ }
+ }
+ }
+ default:
+ panic(fmt.Sprintf("merger not found for type:%s", tf))
+ }
+ mi.fields = append(mi.fields, mfi)
+ }
+
+ mi.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ mi.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/agent/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 00000000000..ebf1caa56a2
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2051 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+ // Load the unmarshal information for this message type.
+ // The atomic load ensures memory consistency.
+ u := atomicLoadUnmarshalInfo(&a.unmarshal)
+ if u == nil {
+ // Slow path: find unmarshal info for msg, update a with it.
+ u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+ atomicStoreUnmarshalInfo(&a.unmarshal, u)
+ }
+ // Then do the unmarshaling.
+ err := u.unmarshal(toPointer(&msg), b)
+ return err
+}
+
+type unmarshalInfo struct {
+ typ reflect.Type // type of the protobuf struct
+
+ // 0 = only typ field is initialized
+ // 1 = completely initialized
+ initialized int32
+ lock sync.Mutex // prevents double initialization
+ dense []unmarshalFieldInfo // fields indexed by tag #
+ sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
+ reqFields []string // names of required fields
+ reqMask uint64 // 1< 0 {
+ // Read tag and wire type.
+ // Special case 1 and 2 byte varints.
+ var x uint64
+ if b[0] < 128 {
+ x = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ }
+ tag := x >> 3
+ wire := int(x) & 7
+
+ // Dispatch on the tag to one of the unmarshal* functions below.
+ var f unmarshalFieldInfo
+ if tag < uint64(len(u.dense)) {
+ f = u.dense[tag]
+ } else {
+ f = u.sparse[tag]
+ }
+ if fn := f.unmarshal; fn != nil {
+ var err error
+ b, err = fn(b, m.offset(f.field), wire)
+ if err == nil {
+ reqMask |= f.reqMask
+ continue
+ }
+ if r, ok := err.(*RequiredNotSetError); ok {
+ // Remember this error, but keep parsing. We need to produce
+ // a full parse even if a required field is missing.
+ if errLater == nil {
+ errLater = r
+ }
+ reqMask |= f.reqMask
+ continue
+ }
+ if err != errInternalBadWireType {
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return err
+ }
+ // Fragments with bad wire type are treated as unknown fields.
+ }
+
+ // Unknown tag.
+ if !u.unrecognized.IsValid() {
+ // Don't keep unrecognized data; just skip it.
+ var err error
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ // Keep unrecognized data around.
+ // maybe in extensions, maybe in the unrecognized field.
+ z := m.offset(u.unrecognized).toBytes()
+ var emap map[int32]Extension
+ var e Extension
+ for _, r := range u.extensionRanges {
+ if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+ if u.extensions.IsValid() {
+ mp := m.offset(u.extensions).toExtensions()
+ emap = mp.extensionsWrite()
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ if u.oldExtensions.IsValid() {
+ p := m.offset(u.oldExtensions).toOldExtensions()
+ emap = *p
+ if emap == nil {
+ emap = map[int32]Extension{}
+ *p = emap
+ }
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ panic("no extensions field available")
+ }
+ }
+
+ // Use wire type to skip data.
+ var err error
+ b0 := b
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ *z = encodeVarint(*z, tag<<3|uint64(wire))
+ *z = append(*z, b0[:len(b0)-len(b)]...)
+
+ if emap != nil {
+ emap[int32(tag)] = e
+ }
+ }
+ if reqMask != u.reqMask && errLater == nil {
+ // A required field of this message is missing.
+ for _, n := range u.reqFields {
+ if reqMask&1 == 0 {
+ errLater = &RequiredNotSetError{n}
+ }
+ reqMask >>= 1
+ }
+ }
+ return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ if u.initialized != 0 {
+ return
+ }
+ t := u.typ
+ n := t.NumField()
+
+ // Set up the "not found" value for the unrecognized byte buffer.
+ // This is the default for proto3.
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.oldExtensions = invalidField
+
+ // List of the generated type and offset for each oneof field.
+ type oneofField struct {
+ ityp reflect.Type // interface type of oneof field
+ field field // offset in containing message
+ }
+ var oneofFields []oneofField
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if f.Name == "XXX_unrecognized" {
+ // The byte slice used to hold unrecognized input is special.
+ if f.Type != reflect.TypeOf(([]byte)(nil)) {
+ panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+ }
+ u.unrecognized = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_InternalExtensions" {
+ // Ditto here.
+ if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+ panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+ }
+ u.extensions = toField(&f)
+ if f.Tag.Get("protobuf_messageset") == "1" {
+ u.isMessageSet = true
+ }
+ continue
+ }
+ if f.Name == "XXX_extensions" {
+ // An older form of the extensions field.
+ if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+ panic("bad type for XXX_extensions field: " + f.Type.Name())
+ }
+ u.oldExtensions = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+ continue
+ }
+
+ oneof := f.Tag.Get("protobuf_oneof")
+ if oneof != "" {
+ oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+ // The rest of oneof processing happens below.
+ continue
+ }
+
+ tags := f.Tag.Get("protobuf")
+ tagArray := strings.Split(tags, ",")
+ if len(tagArray) < 2 {
+ panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+ }
+ tag, err := strconv.Atoi(tagArray[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagArray[1])
+ }
+
+ name := ""
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Extract unmarshaling function from the field (its type and tags).
+ unmarshal := fieldUnmarshaler(&f)
+
+ // Required field?
+ var reqMask uint64
+ if tagArray[2] == "req" {
+ bit := len(u.reqFields)
+ u.reqFields = append(u.reqFields, name)
+ reqMask = uint64(1) << uint(bit)
+ // TODO: if we have more than 64 required fields, we end up
+ // not verifying that all required fields are present.
+ // Fix this, perhaps using a count of required fields?
+ }
+
+ // Store the info in the correct slot in the message.
+ u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+ }
+
+ // Find any types associated with oneof fields.
+ // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
+ if fn.IsValid() {
+ res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
+ for i := res.Len() - 1; i >= 0; i-- {
+ v := res.Index(i) // interface{}
+ tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ fieldNum, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tags[1])
+ }
+ var name string
+ for _, tag := range tags {
+ if strings.HasPrefix(tag, "name=") {
+ name = strings.TrimPrefix(tag, "name=")
+ break
+ }
+ }
+
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(fieldNum, of.field, unmarshal, 0, name)
+ }
+ }
+ }
+ }
+
+ // Get extension ranges, if any.
+ fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ if fn.IsValid() {
+ if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+ panic("a message with extensions, but no extensions field in " + t.Name())
+ }
+ u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+ }
+
+ // Explicitly disallow tag 0. This will ensure we flag an error
+ // when decoding a buffer of all zeros. Without this code, we
+ // would decode and skip an all-zero buffer of even length.
+ // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+ u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+ return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+ }, 0, "")
+
+ // Set mask for required field check.
+ u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+ for len(u.dense) <= tag {
+ u.dense = append(u.dense, unmarshalFieldInfo{})
+ }
+ u.dense[tag] = i
+ return
+ }
+ if u.sparse == nil {
+ u.sparse = map[uint64]unmarshalFieldInfo{}
+ }
+ u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+ if f.Type.Kind() == reflect.Map {
+ return makeUnmarshalMap(f)
+ }
+ return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+ tagArray := strings.Split(tags, ",")
+ encoding := tagArray[0]
+ name := "unknown"
+ proto3 := false
+ validateUTF8 := true
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ if tag == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ // Figure out packaging (pointer, slice, or both)
+ slice := false
+ pointer := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ // We'll never have both pointer and slice for basic types.
+ if pointer && slice && t.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + t.Name())
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return unmarshalBoolPtr
+ }
+ if slice {
+ return unmarshalBoolSlice
+ }
+ return unmarshalBoolValue
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixedS32Ptr
+ }
+ if slice {
+ return unmarshalFixedS32Slice
+ }
+ return unmarshalFixedS32Value
+ case "varint":
+ // this could be int32 or enum
+ if pointer {
+ return unmarshalInt32Ptr
+ }
+ if slice {
+ return unmarshalInt32Slice
+ }
+ return unmarshalInt32Value
+ case "zigzag32":
+ if pointer {
+ return unmarshalSint32Ptr
+ }
+ if slice {
+ return unmarshalSint32Slice
+ }
+ return unmarshalSint32Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixedS64Ptr
+ }
+ if slice {
+ return unmarshalFixedS64Slice
+ }
+ return unmarshalFixedS64Value
+ case "varint":
+ if pointer {
+ return unmarshalInt64Ptr
+ }
+ if slice {
+ return unmarshalInt64Slice
+ }
+ return unmarshalInt64Value
+ case "zigzag64":
+ if pointer {
+ return unmarshalSint64Ptr
+ }
+ if slice {
+ return unmarshalSint64Slice
+ }
+ return unmarshalSint64Value
+ }
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixed32Ptr
+ }
+ if slice {
+ return unmarshalFixed32Slice
+ }
+ return unmarshalFixed32Value
+ case "varint":
+ if pointer {
+ return unmarshalUint32Ptr
+ }
+ if slice {
+ return unmarshalUint32Slice
+ }
+ return unmarshalUint32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixed64Ptr
+ }
+ if slice {
+ return unmarshalFixed64Slice
+ }
+ return unmarshalFixed64Value
+ case "varint":
+ if pointer {
+ return unmarshalUint64Ptr
+ }
+ if slice {
+ return unmarshalUint64Slice
+ }
+ return unmarshalUint64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return unmarshalFloat32Ptr
+ }
+ if slice {
+ return unmarshalFloat32Slice
+ }
+ return unmarshalFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return unmarshalFloat64Ptr
+ }
+ if slice {
+ return unmarshalFloat64Slice
+ }
+ return unmarshalFloat64Value
+ case reflect.Map:
+ panic("map type in typeUnmarshaler in " + t.Name())
+ case reflect.Slice:
+ if pointer {
+ panic("bad pointer in slice case in " + t.Name())
+ }
+ if slice {
+ return unmarshalBytesSlice
+ }
+ return unmarshalBytesValue
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return unmarshalUTF8StringPtr
+ }
+ if slice {
+ return unmarshalUTF8StringSlice
+ }
+ return unmarshalUTF8StringValue
+ }
+ if pointer {
+ return unmarshalStringPtr
+ }
+ if slice {
+ return unmarshalStringSlice
+ }
+ return unmarshalStringValue
+ case reflect.Struct:
+ // message or group field
+ if !pointer {
+ panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+ }
+ switch encoding {
+ case "bytes":
+ if slice {
+ return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+ case "group":
+ if slice {
+ return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+ }
+ }
+ panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64() = v
+ return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32() = v
+ return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ *f.toInt32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.setInt32Ptr(v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ // Note: any length varint is allowed, even though any sane
+ // encoder will use one byte.
+ // See https://github.com/golang/protobuf/issues/76
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // TODO: check if x>1? Tests seem to indicate no.
+ v := x != 0
+ *f.toBool() = v
+ return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ *f.toBoolPtr() = &v
+ return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ b = b[n:]
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64() = v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32() = v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The use of append here is a trick which avoids the zeroing
+ // that would be required if we used a make/copy pair.
+ // We append to emptyBuf instead of nil because we want
+ // a non-nil result even when the length is 0.
+ v := append(emptyBuf[:], b[:x]...)
+ *f.toBytes() = v
+ return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := append(emptyBuf[:], b[:x]...)
+ s := f.toBytesSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // First read the message field to see if something is there.
+ // The semantics of multiple submessages are weird. Instead of
+ // the last one winning (as it is for all other fields), multiple
+ // submessages are merged.
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+ t := f.Type
+ kt := t.Key()
+ vt := t.Elem()
+ unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+ unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // The map entry is a submessage. Figure out how big it is.
+ if w != WireBytes {
+ return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := b[x:] // unused data to return
+ b = b[:x] // data for map entry
+
+ // Note: we could use #keys * #values ~= 200 functions
+ // to do map decoding without reflection. Probably not worth it.
+ // Maps will be somewhat slow. Oh well.
+
+ // Read key and value from data.
+ var nerr nonFatal
+ k := reflect.New(kt)
+ v := reflect.New(vt)
+ for len(b) > 0 {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ wire := int(x) & 7
+ b = b[n:]
+
+ var err error
+ switch x >> 3 {
+ case 1:
+ b, err = unmarshalKey(b, valToPointer(k), wire)
+ case 2:
+ b, err = unmarshalVal(b, valToPointer(v), wire)
+ default:
+ err = errInternalBadWireType // skip unknown tag
+ }
+
+ if nerr.Merge(err) {
+ continue
+ }
+ if err != errInternalBadWireType {
+ return nil, err
+ }
+
+ // Skip past unknown fields.
+ b, err = skipField(b, wire)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get map, allocate if needed.
+ m := f.asPointerTo(t).Elem() // an addressable map[K]T
+ if m.IsNil() {
+ m.Set(reflect.MakeMap(t))
+ }
+
+ // Insert into map.
+ m.SetMapIndex(k.Elem(), v.Elem())
+
+ return r, nerr.E
+ }
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+// oneof F {
+// int64 X = 1;
+// float64 Y = 2;
+// }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+ sf := typ.Field(0)
+ field0 := toField(&sf)
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // Allocate holder for value.
+ v := reflect.New(typ)
+
+ // Unmarshal data into holder.
+ // We unmarshal into the first field of the holder object.
+ var err error
+ var nerr nonFatal
+ b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+ if !nerr.Merge(err) {
+ return nil, err
+ }
+
+ // Write pointer to holder into target field.
+ f.asPointerTo(ityp).Elem().Set(v)
+
+ return b, nerr.E
+ }
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+ switch wire {
+ case WireVarint:
+ _, k := decodeVarint(b)
+ if k == 0 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[k:]
+ case WireFixed32:
+ if len(b) < 4 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[4:]
+ case WireFixed64:
+ if len(b) < 8 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[8:]
+ case WireBytes:
+ m, k := decodeVarint(b)
+ if k == 0 || uint64(len(b)-k) < m {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[uint64(k)+m:]
+ case WireStartGroup:
+ _, i := findEndGroup(b)
+ if i == -1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[i:]
+ default:
+ return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+ }
+ return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+ depth := 1
+ i := 0
+ for {
+ x, n := decodeVarint(b[i:])
+ if n == 0 {
+ return -1, -1
+ }
+ j := i
+ i += n
+ switch x & 7 {
+ case WireVarint:
+ _, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ case WireFixed32:
+ if len(b)-4 < i {
+ return -1, -1
+ }
+ i += 4
+ case WireFixed64:
+ if len(b)-8 < i {
+ return -1, -1
+ }
+ i += 8
+ case WireBytes:
+ m, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ if uint64(len(b)-i) < m {
+ return -1, -1
+ }
+ i += int(m)
+ case WireStartGroup:
+ depth++
+ case WireEndGroup:
+ depth--
+ if depth == 0 {
+ return j, i
+ }
+ default:
+ return -1, -1
+ }
+ }
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+ for x >= 1<<7 {
+ b = append(b, byte(x&0x7f|0x80))
+ x >>= 7
+ }
+ return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+ var x, y uint64
+ if len(b) <= 0 {
+ goto bad
+ }
+ x = uint64(b[0])
+ if x < 0x80 {
+ return x, 1
+ }
+ x -= 0x80
+
+ if len(b) <= 1 {
+ goto bad
+ }
+ y = uint64(b[1])
+ x += y << 7
+ if y < 0x80 {
+ return x, 2
+ }
+ x -= 0x80 << 7
+
+ if len(b) <= 2 {
+ goto bad
+ }
+ y = uint64(b[2])
+ x += y << 14
+ if y < 0x80 {
+ return x, 3
+ }
+ x -= 0x80 << 14
+
+ if len(b) <= 3 {
+ goto bad
+ }
+ y = uint64(b[3])
+ x += y << 21
+ if y < 0x80 {
+ return x, 4
+ }
+ x -= 0x80 << 21
+
+ if len(b) <= 4 {
+ goto bad
+ }
+ y = uint64(b[4])
+ x += y << 28
+ if y < 0x80 {
+ return x, 5
+ }
+ x -= 0x80 << 28
+
+ if len(b) <= 5 {
+ goto bad
+ }
+ y = uint64(b[5])
+ x += y << 35
+ if y < 0x80 {
+ return x, 6
+ }
+ x -= 0x80 << 35
+
+ if len(b) <= 6 {
+ goto bad
+ }
+ y = uint64(b[6])
+ x += y << 42
+ if y < 0x80 {
+ return x, 7
+ }
+ x -= 0x80 << 42
+
+ if len(b) <= 7 {
+ goto bad
+ }
+ y = uint64(b[7])
+ x += y << 49
+ if y < 0x80 {
+ return x, 8
+ }
+ x -= 0x80 << 49
+
+ if len(b) <= 8 {
+ goto bad
+ }
+ y = uint64(b[8])
+ x += y << 56
+ if y < 0x80 {
+ return x, 9
+ }
+ x -= 0x80 << 56
+
+ if len(b) <= 9 {
+ goto bad
+ }
+ y = uint64(b[9])
+ x += y << 63
+ if y < 2 {
+ return x, 10
+ }
+
+bad:
+ return 0, 0
+}
diff --git a/agent/vendor/github.com/golang/protobuf/proto/text.go b/agent/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 00000000000..1aaee725b45
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if name == "XXX_NoUnkeyedLiteral" {
+ continue
+ }
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, err := extendable(pv.Interface()); err == nil {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if v.CanAddr() {
+ // Calling v.Interface on a struct causes the reflect package to
+ // copy the entire struct. This is racy with the new Marshaler
+ // since we atomically update the XXX_sizecache.
+ //
+ // Thus, we retrieve a pointer to the struct if possible to avoid
+ // a race since v.Interface on the pointer doesn't copy the struct.
+ //
+ // If v is not addressable, then we are not worried about a race
+ // since it implies that the binary Marshaler cannot possibly be
+ // mutating this value.
+ v = v.Addr()
+ }
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/agent/vendor/github.com/golang/protobuf/proto/text_parser.go b/agent/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 00000000000..bb55a3af276
--- /dev/null
+++ b/agent/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(i), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ return um.UnmarshalText([]byte(s))
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/agent/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
new file mode 100644
index 00000000000..8dada3edaf5
--- /dev/null
+++ b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/agent/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
new file mode 100644
index 00000000000..5d8cb5b72e7
--- /dev/null
+++ b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
@@ -0,0 +1 @@
+Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
new file mode 100644
index 00000000000..e16fb946bb9
--- /dev/null
+++ b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
@@ -0,0 +1 @@
+cover.dat
diff --git a/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
new file mode 100644
index 00000000000..81be214370d
--- /dev/null
+++ b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
@@ -0,0 +1,7 @@
+all:
+
+cover:
+ go test -cover -v -coverprofile=cover.dat ./...
+ go tool cover -func cover.dat
+
+.PHONY: cover
diff --git a/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 00000000000..258c0636aac
--- /dev/null
+++ b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ var headerBuf [binary.MaxVarintLen32]byte
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 00000000000..c318385cbed
--- /dev/null
+++ b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 00000000000..8fb59ad226f
--- /dev/null
+++ b/agent/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ var buf [binary.MaxVarintLen32]byte
+ encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/LICENSE b/agent/vendor/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/agent/vendor/github.com/prometheus/client_golang/NOTICE b/agent/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 00000000000..dd878a30ee9
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,23 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/agent/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
new file mode 100644
index 00000000000..3460f0346d9
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
@@ -0,0 +1 @@
+command-line-arguments.test
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/README.md b/agent/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 00000000000..44986bff06b
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1 @@
+See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 00000000000..c0d70b2faf1
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,120 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Registerer.Register.
+//
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation.
+ //
+ // It is valid if one and the same Collector sends duplicate
+ // descriptors. Those duplicates are simply ignored. However, two
+ // different Collectors must not send duplicate descriptors.
+ //
+ // Sending no descriptor at all marks the Collector as “unchecked”,
+ // i.e. no checks will be performed at registration time, and the
+ // Collector may yield any Metric it sees fit in its Collect method.
+ //
+ // This method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. It may be called concurrently and
+ // therefore must be implemented in a concurrency safe way.
+ //
+ // If a Collector encounters an error while executing this method, it
+ // must send an invalid descriptor (created with NewInvalidDesc) to
+ // signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by the Prometheus registry when collecting
+ // metrics. The implementation sends each collected metric via the
+ // provided channel and returns once the last metric has been sent. The
+ // descriptor of each sent metric is one of those returned by Describe
+ // (unless the Collector is unchecked, see above). Returned metrics that
+ // share the same descriptor must differ in their variable label
+ // values.
+ //
+ // This method may be called concurrently and must therefore be
+ // implemented in a concurrency safe way. Blocking occurs at the expense
+ // of total performance of rendering all registered metrics. Ideally,
+ // Collector implementations support concurrent readers.
+ Collect(chan<- Metric)
+}
+
+// DescribeByCollect is a helper to implement the Describe method of a custom
+// Collector. It collects the metrics from the provided Collector and sends
+// their descriptors to the provided channel.
+//
+// If a Collector collects the same metrics throughout its lifetime, its
+// Describe method can simply be implemented as:
+//
+// func (c customCollector) Describe(ch chan<- *Desc) {
+// DescribeByCollect(c, ch)
+// }
+//
+// However, this will not work if the metrics collected change dynamically over
+// the lifetime of the Collector in a way that their combined set of descriptors
+// changes as well. The shortcut implementation will then violate the contract
+// of the Describe method. If a Collector sometimes collects no metrics at all
+// (for example vectors like CounterVec, GaugeVec, etc., which only collect
+// metrics after a metric with a fully specified label set has been accessed),
+// it might even get registered as an unchecked Collecter (cf. the Register
+// method of the Registerer interface). Hence, only use this shortcut
+// implementation of Describe if you are certain to fulfill the contract.
+//
+// The Collector example demonstrates a use of DescribeByCollect.
+func DescribeByCollect(c Collector, descs chan<- *Desc) {
+ metrics := make(chan Metric)
+ go func() {
+ c.Collect(metrics)
+ close(metrics)
+ }()
+ for m := range metrics {
+ descs <- m.Desc()
+ }
+}
+
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
+ self Metric
+}
+
+// init provides the selfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *selfCollector) init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *selfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *selfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 00000000000..765e4550c66
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,277 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "math"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Inc increments the counter by 1. Use Add to increment it by arbitrary
+ // non-negative values.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+//
+// The returned implementation tracks the counter value in two separate
+// variables, a float64 and a uint64. The latter is used to track calls of the
+// Inc method and calls of the Add method with a value that can be represented
+// as a uint64. This allows atomic increments of the counter with optimal
+// performance. (It is common to have an Inc call in very hot execution paths.)
+// Both internal tracking values are added up in the Write method. This has to
+// be taken into account when it comes to precision and overflow behavior.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ // valBits contains the bits of the represented float64 value, while
+ // valInt stores values that are exact integers. Both have to go first
+ // in the struct to guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+ valInt uint64
+
+ selfCollector
+ desc *Desc
+
+ labelPairs []*dto.LabelPair
+}
+
+func (c *counter) Desc() *Desc {
+ return c.desc
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ ival := uint64(v)
+ if float64(ival) == v {
+ atomic.AddUint64(&c.valInt, ival)
+ return
+ }
+
+ for {
+ oldBits := atomic.LoadUint64(&c.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (c *counter) Inc() {
+ atomic.AddUint64(&c.valInt, 1)
+}
+
+func (c *counter) Write(out *dto.Metric) error {
+ fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
+ ival := atomic.LoadUint64(&c.valInt)
+ val := fval + float64(ival)
+
+ return populateMetric(CounterValue, val, c.labelPairs, out)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+type CounterVec struct {
+ *metricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ if len(lvs) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
+ result.init(result) // Init self-collection.
+ return result
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Counter for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created.
+//
+// It is possible to call this method without using the returned Counter to only
+// create the new Counter but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Counter for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Counter from the CounterVec. In that case,
+// the Counter will still exist, but it will not be exported anymore, even if a
+// Counter with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Counter for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Counter is created. Implications of
+// creating a Counter without using it and keeping the Counter for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
+ c, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *CounterVec) With(labels Labels) Counter {
+ c, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the CounterVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &CounterVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 00000000000..7b8827ffbca
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occurred during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if !model.IsValidMetricName(model.LabelValue(fqName)) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Validate the const label values. They can't have a wrong cardinality, so
+ // use in len(labelValues) as expectedNumberOfValues.
+ if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
+ d.err = err
+ return d
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+
+ vh := hashNew()
+ for _, val := range labelValues {
+ vh = hashAdd(vh, val)
+ vh = hashAddByte(vh, separatorByte)
+ }
+ d.id = vh
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ lh := hashNew()
+ lh = hashAdd(lh, help)
+ lh = hashAddByte(lh, separatorByte)
+ for _, labelName := range labelNames {
+ lh = hashAdd(lh, labelName)
+ lh = hashAddByte(lh, separatorByte)
+ }
+ d.dimHash = lh
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(labelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 00000000000..5d9525defc8
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,201 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus is the core instrumentation package. It provides metrics
+// primitives to instrument code for monitoring. It also offers a registry for
+// metrics. Sub-packages allow to expose the registered metrics via HTTP
+// (package promhttp) or push them to a Pushgateway (package push). There is
+// also a sub-package promauto, which provides metrics constructors with
+// automatic registration.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// A Basic Example
+//
+// As a starting point, a very basic usage example:
+//
+// package main
+//
+// import (
+// "log"
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// )
+// )
+//
+// func init() {
+// // Metrics have to be registered to be exposed:
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+// // The Handler function provides a default handler to expose metrics
+// // via an HTTP server. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.Handler())
+// log.Fatal(http.ListenAndServe(":8080", nil))
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+//
+// Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. However, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage. Furthermore, if you are not concerned with
+// fine-grained control of when and how to register metrics with the registry,
+// have a look at the promauto package, which will effectively allow you to
+// ignore registration altogether in simple cases.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
+//
+// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
+// Prometheus server not to assume anything about its type.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary,
+// Histogram, and Untyped, a very important part of the Prometheus data model is
+// the partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// HistogramVec, and UntypedVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
+// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
+// SummaryVec, HistogramVec, and UntypedVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
+// UntypedOpts.
+//
+// Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). That will happen in
+// the Collect method. The Describe method has to return separate Desc
+// instances, representative of the “throw-away” metrics to be created later.
+// NewDesc comes in handy to create those Desc instances. Alternatively, you
+// could return no Desc at all, which will marke the Collector “unchecked”. No
+// checks are porformed at registration time, but metric consistency will still
+// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
+// metrics that lead to inconsistencies in the total scrape result lies with the
+// implementer of the Collector. While this is not a desirable state, it is
+// sometimes necessary. The typical use case is a situatios where the exact
+// metrics to be returned by a Collector cannot be predicted at registration
+// time, but the implementer has sufficient knowledge of the whole system to
+// guarantee metric consistency.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might cause.
+// As suggested by the name, MustRegister panics if an error occurs. With the
+// Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data model.
+// Inconsistencies are ideally detected at registration time, not at collect
+// time. The former will usually be detected at start-up time of a program,
+// while the latter will only happen at scrape time, possibly not even on the
+// first scrape if the inconsistency only becomes relevant later. That is the
+// main reason why a Collector and a Metric have to describe themselves to the
+// registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegisterer variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
+// same way on a custom registry as the global functions Register and Unregister
+// on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries with
+// special properties, see NewPedanticRegistry. You can avoid global state, as
+// it is imposed by the DefaultRegisterer. You can use multiple registries at
+// the same time to expose different metrics in different ways. You can use
+// separate registries for testing purposes.
+//
+// Also note that the DefaultRegisterer comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
+// (The top-level functions in the prometheus package are deprecated.)
+//
+// Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// Graphite Bridge
+//
+// Functions and examples to push metrics from a Gatherer to Graphite can be
+// found in the graphite sub-package.
+//
+// Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added by following the approaches
+// of the existing implementations.
+package prometheus
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
new file mode 100644
index 00000000000..18a99d5faaa
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+type expvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated expvar Collector that still has
+// to be registered with a Prometheus registry.
+//
+// An expvar Collector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the expvar Collector is inherently slower
+// than native Prometheus metrics. Thus, the expvar Collector is probably great
+// for experiments and prototying, but you should seriously consider a more
+// direct implementation of Prometheus metrics for monitoring production
+// systems.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+ return &expvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *expvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 00000000000..3d383a735c3
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 00000000000..17c72d7eb0c
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,286 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "sync/atomic"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+ // values.
+ Inc()
+ // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+ // values.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be negative,
+ // resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+
+ // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+ SetToCurrentTime()
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+//
+// The returned implementation is optimized for a fast Set method. If you have a
+// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
+// the former. For example, the Inc method of the returned Gauge is slower than
+// the Inc method of a Counter returned by NewCounter. This matches the typical
+// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
+// the latter Inc-heavy.
+func NewGauge(opts GaugeOpts) Gauge {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type gauge struct {
+ // valBits contains the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ selfCollector
+
+ desc *Desc
+ labelPairs []*dto.LabelPair
+}
+
+func (g *gauge) Desc() *Desc {
+ return g.desc
+}
+
+func (g *gauge) Set(val float64) {
+ atomic.StoreUint64(&g.valBits, math.Float64bits(val))
+}
+
+func (g *gauge) SetToCurrentTime() {
+ g.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (g *gauge) Inc() {
+ g.Add(1)
+}
+
+func (g *gauge) Dec() {
+ g.Add(-1)
+}
+
+func (g *gauge) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&g.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (g *gauge) Sub(val float64) {
+ g.Add(val * -1)
+}
+
+func (g *gauge) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
+ return populateMetric(GaugeValue, val, g.labelPairs, out)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ *metricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ if len(lvs) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
+ result.init(result) // Init self-collection.
+ return result
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Gauge for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Gauge is created.
+//
+// It is possible to call this method without using the returned Gauge to only
+// create the new Gauge but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Gauge for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
+// Gauge will still exist, but it will not be exported anymore, even if a
+// Gauge with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Gauge for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Gauge is created. Implications of
+// creating a Gauge without using it and keeping the Gauge for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ g, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return g
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *GaugeVec) With(labels Labels) Gauge {
+ g, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return g
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the GaugeVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &GaugeVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 00000000000..ba3b9333edd
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,301 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutinesDesc *Desc
+ threadsDesc *Desc
+ gcDesc *Desc
+ goInfoDesc *Desc
+
+ // metrics to describe and collect
+ metrics memStatsMetrics
+}
+
+// NewGoCollector returns a collector which exports metrics about the current Go
+// process. This includes memory stats. To collect those, runtime.ReadMemStats
+// is called. This causes a stop-the-world, which is very short with Go1.9+
+// (~25µs). However, with older Go versions, the stop-the-world duration depends
+// on the heap size and can be quite significant (~1.7 ms/GiB as per
+// https://go-review.googlesource.com/c/go/+/34937).
+func NewGoCollector() Collector {
+ return &goCollector{
+ goroutinesDesc: NewDesc(
+ "go_goroutines",
+ "Number of goroutines that currently exist.",
+ nil, nil),
+ threadsDesc: NewDesc(
+ "go_threads",
+ "Number of OS threads created.",
+ nil, nil),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ goInfoDesc: NewDesc(
+ "go_info",
+ "Information about the Go environment.",
+ nil, Labels{"version": runtime.Version()}),
+ metrics: memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
+ },
+ },
+ }
+}
+
+func memstatNamespace(s string) string {
+ return fmt.Sprintf("go_memstats_%s", s)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutinesDesc
+ ch <- c.threadsDesc
+ ch <- c.gcDesc
+ ch <- c.goInfoDesc
+ for _, i := range c.metrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
+ n, _ := runtime.ThreadCreateProfile(nil)
+ ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
+
+ ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
+
+ ms := &runtime.MemStats{}
+ runtime.ReadMemStats(ms)
+ for _, i := range c.metrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
+
+// memStatsMetrics provide description, value, and value type for memstat metrics.
+type memStatsMetrics []struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 00000000000..4d7fa976e47
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,614 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "sort"
+ "sync"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var (
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram.
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts
+ // for both states:
+ h.counts[0].buckets = make([]uint64, len(h.upperBounds))
+ h.counts[1].buckets = make([]uint64, len(h.upperBounds))
+
+ h.init(h) // Init self-collection.
+ return h
+}
+
+type histogramCounts struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+ buckets []uint64
+}
+
+type histogram struct {
+ // countAndHotIdx is a complicated one. For lock-free yet atomic
+ // observations, we need to save the total count of observations again,
+ // combined with the index of the currently-hot counts struct, so that
+ // we can perform the operation on both values atomically. The least
+ // significant bit defines the hot counts struct. The remaining 63 bits
+ // represent the total count of observations. This happens under the
+ // assumption that the 63bit count will never overflow. Rationale: An
+ // observations takes about 30ns. Let's assume it could happen in
+ // 10ns. Overflowing the counter will then take at least (2^63)*10ns,
+ // which is about 3000 years.
+ //
+ // This has to be first in the struct for 64bit alignment. See
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ countAndHotIdx uint64
+
+ selfCollector
+ desc *Desc
+ writeMtx sync.Mutex // Only used in the Write method.
+
+ upperBounds []float64
+
+ // Two counts, one is "hot" for lock-free observations, the other is
+ // "cold" for writing out a dto.Metric. It has to be an array of
+ // pointers to guarantee 64bit alignment of the histogramCounts, see
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ counts [2]*histogramCounts
+ hotIdx int // Index of currently-hot counts. Only used within Write.
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+
+ // We increment h.countAndHotIdx by 2 so that the counter in the upper
+ // 63 bits gets incremented by 1. At the same time, we get the new value
+ // back, which we can use to find the currently-hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 2)
+ hotCounts := h.counts[n%2]
+
+ if i < len(h.upperBounds) {
+ atomic.AddUint64(&hotCounts.buckets[i], 1)
+ }
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+ // Increment count last as we take it as a signal that the observation
+ // is complete.
+ atomic.AddUint64(&hotCounts.count, 1)
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ var (
+ his = &dto.Histogram{}
+ buckets = make([]*dto.Bucket, len(h.upperBounds))
+ hotCounts, coldCounts *histogramCounts
+ count uint64
+ )
+
+ // For simplicity, we mutex the rest of this method. It is not in the
+ // hot path, i.e. Observe is called much more often than Write. The
+ // complication of making Write lock-free isn't worth it.
+ h.writeMtx.Lock()
+ defer h.writeMtx.Unlock()
+
+ // This is a bit arcane, which is why the following spells out this if
+ // clause in English:
+ //
+ // If the currently-hot counts struct is #0, we atomically increment
+ // h.countAndHotIdx by 1 so that from now on Observe will use the counts
+ // struct #1. Furthermore, the atomic increment gives us the new value,
+ // which, in its most significant 63 bits, tells us the count of
+ // observations done so far up to and including currently ongoing
+ // observations still using the counts struct just changed from hot to
+ // cold. To have a normal uint64 for the count, we bitshift by 1 and
+ // save the result in count. We also set h.hotIdx to 1 for the next
+ // Write call, and we will refer to counts #1 as hotCounts and to counts
+ // #0 as coldCounts.
+ //
+ // If the currently-hot counts struct is #1, we do the corresponding
+ // things the other way round. We have to _decrement_ h.countAndHotIdx
+ // (which is a bit arcane in itself, as we have to express -1 with an
+ // unsigned int...).
+ if h.hotIdx == 0 {
+ count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
+ h.hotIdx = 1
+ hotCounts = h.counts[1]
+ coldCounts = h.counts[0]
+ } else {
+ count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
+ h.hotIdx = 0
+ hotCounts = h.counts[0]
+ coldCounts = h.counts[1]
+ }
+
+ // Now we have to wait for the now-declared-cold counts to actually cool
+ // down, i.e. wait for all observations still using it to finish. That's
+ // the case once the count in the cold counts struct is the same as the
+ // one atomically retrieved from the upper 63bits of h.countAndHotIdx.
+ for {
+ if count == atomic.LoadUint64(&coldCounts.count) {
+ break
+ }
+ runtime.Gosched() // Let observations get work done.
+ }
+
+ his.SampleCount = proto.Uint64(count)
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
+ var cumCount uint64
+ for i, upperBound := range h.upperBounds {
+ cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(cumCount),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ // Finally add all the cold counts to the new hot counts and reset the cold counts.
+ atomic.AddUint64(&hotCounts.count, count)
+ atomic.StoreUint64(&coldCounts.count, 0)
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ atomic.StoreUint64(&coldCounts.sumBits, 0)
+ break
+ }
+ }
+ for i := range h.upperBounds {
+ atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
+ atomic.StoreUint64(&coldCounts.buckets[i], 0)
+ }
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ *metricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Histogram for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Histogram is created.
+//
+// It is possible to call this method without using the returned Histogram to only
+// create the new Histogram but leave it at its starting value, a Histogram without
+// any observations.
+//
+// Keeping the Histogram for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
+// Histogram will still exist, but it will not be exported anymore, even if a
+// Histogram with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Histogram for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Histogram is created. Implications of
+// creating a Histogram without using it and keeping the Histogram for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
+ h, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+// With works as GetMetricWith but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *HistogramVec) With(labels Labels) Observer {
+ h, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the HistogramVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &HistogramVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/http.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 00000000000..4b8e6027335
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,505 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/common/expfmt"
+)
+
+// TODO(beorn7): Remove this whole file. It is a partial mirror of
+// promhttp/http.go (to avoid circular import chains) where everything HTTP
+// related should live. The functions here are just for avoiding
+// breakage. Everything is deprecated.
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the DefaultGatherer. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name).
+//
+// Deprecated: Please note the issues described in the doc comment of
+// InstrumentHandler. You might want to consider using promhttp.Handler instead.
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", UninstrumentedHandler())
+}
+
+// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
+//
+// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
+// instead. See there for further documentation.
+func UninstrumentedHandler() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := DefaultGatherer.Gather()
+ if err != nil {
+ http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ })
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part = strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+//
+// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
+// package promhttp instead. The issues are the following: (1) It uses Summaries
+// rather than Histograms. Summaries are not useful if aggregation across
+// multiple instances is required. (2) It uses microseconds as unit, which is
+// deprecated and should be replaced by seconds. (3) The size of the request is
+// calculated in a separate goroutine. Since this calculator requires access to
+// the request header, it creates a race with any writes to the header performed
+// during request handling. httputil.ReverseProxy is a prominent example for a
+// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
+// https://github.com/prometheus/client_golang/issues/272.
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler (and shares the same
+// issues).
+//
+// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
+// issues) but provides more flexibility (at the cost of a more complex call
+// syntax). As InstrumentHandler, this function registers four metric
+// collectors, but it uses the provided SummaryOpts to create them. However, the
+// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
+// by "requests_total", "request_duration_microseconds", "request_size_bytes",
+// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+//
+// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
+// the same issues) but provides more flexibility (at the cost of a more complex
+// call syntax). See InstrumentHandlerWithOpts for details how the provided
+// SummaryOpts are used.
+//
+// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
+// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+ if err := Register(reqCnt); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqCnt = are.ExistingCollector.(*CounterVec)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+ if err := Register(reqDur); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqDur = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+ if err := Register(reqSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+ if err := Register(resSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ resSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := computeApproximateRequestSize(r)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ reqCnt.WithLabelValues(method, code).Inc()
+ reqDur.Observe(elapsed)
+ resSz.Observe(float64(delegate.written))
+ reqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request) <-chan int {
+ // Get URL length in current goroutine for avoiding a race condition.
+ // HandlerFunc that runs in parallel may modify the URL.
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ out := make(chan int, 1)
+
+ go func() {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+ close(out)
+ }()
+
+ return out
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
new file mode 100644
index 00000000000..351c26e1aed
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -0,0 +1,85 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "sort"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// NormalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name, mf := range metricFamiliesByName {
+ if len(mf.Metric) > 0 {
+ names = append(names, name)
+ }
+ }
+ sort.Strings(names)
+ result := make([]*dto.MetricFamily, 0, len(names))
+ for _, name := range names {
+ result = append(result, metricFamiliesByName[name])
+ }
+ return result
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/labels.go
new file mode 100644
index 00000000000..e68f132ecef
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -0,0 +1,70 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/prometheus/common/model"
+)
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
+ if len(labels) != expectedNumberOfValues {
+ return errInconsistentCardinality
+ }
+
+ for name, val := range labels {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
+ }
+ }
+
+ return nil
+}
+
+func validateLabelValues(vals []string, expectedNumberOfValues int) error {
+ if len(vals) != expectedNumberOfValues {
+ return errInconsistentCardinality
+ }
+
+ for _, val := range vals {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label value %q is not valid UTF-8", val)
+ }
+ }
+
+ return nil
+}
+
+func checkLabelName(l string) bool {
+ return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 00000000000..55e6d86d596
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,174 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Metric implementations must observe concurrency safety as reads of
+ // this metric may occur at any time, and any blocking occurs at the
+ // expense of total performance of rendering all registered
+ // metrics. Ideally, Metric implementations should support concurrent
+ // readers.
+ //
+ // While populating dto.Metric, it is the responsibility of the
+ // implementation to ensure validity of the Metric protobuf (like valid
+ // UTF-8 strings or syntactically valid metric and label names). It is
+ // recommended to sort labels lexicographically. Callers of Write should
+ // still make sure of sorting if they depend on it.
+ Write(*dto.Metric) error
+ // TODO(beorn7): The original rationale of passing in a pre-allocated
+ // dto.Metric protobuf to save allocations has disappeared. The
+ // signature of this method should be changed to "Write() (*dto.Metric,
+ // error)".
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name to a non-empty string. All other fields are
+// optional and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric.
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// labelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type labelPairSorter []*dto.LabelPair
+
+func (s labelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s labelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s labelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
+
+type timestampedMetric struct {
+ Metric
+ t time.Time
+}
+
+func (m timestampedMetric) Write(pb *dto.Metric) error {
+ e := m.Metric.Write(pb)
+ pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
+ return e
+}
+
+// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
+// way that it has an explicit timestamp set to the provided Time. This is only
+// useful in rare cases as the timestamp of a Prometheus metric should usually
+// be set by the Prometheus server during scraping. Exceptions include mirroring
+// metrics with given timestamps from other metric
+// sources.
+//
+// NewMetricWithTimestamp works best with MustNewConstMetric,
+// MustNewConstHistogram, and MustNewConstSummary, see example.
+//
+// Currently, the exposition formats used by Prometheus are limited to
+// millisecond resolution. Thus, the provided time will be rounded down to the
+// next full millisecond value.
+func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
+ return timestampedMetric{Metric: m, t: t}
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/observer.go
new file mode 100644
index 00000000000..5806cd09e30
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -0,0 +1,52 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+ Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+ f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+ GetMetricWith(Labels) (Observer, error)
+ GetMetricWithLabelValues(lvs ...string) (Observer, error)
+ With(Labels) Observer
+ WithLabelValues(...string) Observer
+ CurryWith(Labels) (ObserverVec, error)
+ MustCurryWith(Labels) ObserverVec
+
+ Collector
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 00000000000..55176d58ce6
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,204 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "os"
+
+ "github.com/prometheus/procfs"
+)
+
+type processCollector struct {
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ reportErrors bool
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, maxVsize *Desc
+ rss *Desc
+ startTime *Desc
+}
+
+// ProcessCollectorOpts defines the behavior of a process metrics collector
+// created with NewProcessCollector.
+type ProcessCollectorOpts struct {
+ // PidFn returns the PID of the process the collector collects metrics
+ // for. It is called upon each collection. By default, the PID of the
+ // current process is used, as determined on construction time by
+ // calling os.Getpid().
+ PidFn func() (int, error)
+ // If non-empty, each of the collected metrics is prefixed by the
+ // provided string and an underscore ("_").
+ Namespace string
+ // If true, any error encountered during collection is reported as an
+ // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
+ // and the collected metrics will be incomplete. (Possibly, no metrics
+ // will be collected at all.) While that's usually not desired, it is
+ // appropriate for the common "mix-in" of process metrics, where process
+ // metrics are nice to have, but failing to collect them should not
+ // disrupt the collection of the remaining metrics.
+ ReportErrors bool
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including CPU, memory and file descriptor usage as well as
+// the process start time. The detailed behavior is defined by the provided
+// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
+// collector for the current process with an empty namespace string and no error
+// reporting.
+//
+// Currently, the collector depends on a Linux-style proc filesystem and
+// therefore only exports metrics for Linux.
+//
+// Note: An older version of this function had the following signature:
+//
+// NewProcessCollector(pid int, namespace string) Collector
+//
+// Most commonly, it was called as
+//
+// NewProcessCollector(os.Getpid(), "")
+//
+// The following call of the current version is equivalent to the above:
+//
+// NewProcessCollector(ProcessCollectorOpts{})
+func NewProcessCollector(opts ProcessCollectorOpts) Collector {
+ ns := ""
+ if len(opts.Namespace) > 0 {
+ ns = opts.Namespace + "_"
+ }
+
+ c := &processCollector{
+ reportErrors: opts.ReportErrors,
+ cpuTotal: NewDesc(
+ ns+"process_cpu_seconds_total",
+ "Total user and system CPU time spent in seconds.",
+ nil, nil,
+ ),
+ openFDs: NewDesc(
+ ns+"process_open_fds",
+ "Number of open file descriptors.",
+ nil, nil,
+ ),
+ maxFDs: NewDesc(
+ ns+"process_max_fds",
+ "Maximum number of open file descriptors.",
+ nil, nil,
+ ),
+ vsize: NewDesc(
+ ns+"process_virtual_memory_bytes",
+ "Virtual memory size in bytes.",
+ nil, nil,
+ ),
+ maxVsize: NewDesc(
+ ns+"process_virtual_memory_max_bytes",
+ "Maximum amount of virtual memory available in bytes.",
+ nil, nil,
+ ),
+ rss: NewDesc(
+ ns+"process_resident_memory_bytes",
+ "Resident memory size in bytes.",
+ nil, nil,
+ ),
+ startTime: NewDesc(
+ ns+"process_start_time_seconds",
+ "Start time of the process since unix epoch in seconds.",
+ nil, nil,
+ ),
+ }
+
+ if opts.PidFn == nil {
+ pid := os.Getpid()
+ c.pidFn = func() (int, error) { return pid, nil }
+ } else {
+ c.pidFn = opts.PidFn
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ } else {
+ c.collectFn = func(ch chan<- Metric) {
+ c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+ }
+ }
+
+ return c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.maxVsize
+ ch <- c.rss
+ ch <- c.startTime
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+ if startTime, err := stat.StartTime(); err == nil {
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+ } else {
+ c.reportError(ch, c.startTime, err)
+ }
+ } else {
+ c.reportError(ch, nil, err)
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+ } else {
+ c.reportError(ch, c.openFDs, err)
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+ ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
+ } else {
+ c.reportError(ch, nil, err)
+ }
+}
+
+func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
+ if !c.reportErrors {
+ return
+ }
+ if desc == nil {
+ desc = NewInvalidDesc(err)
+ }
+ ch <- NewInvalidMetric(desc, err)
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
new file mode 100644
index 00000000000..67b56d37cfd
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -0,0 +1,199 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+const (
+ closeNotifier = 1 << iota
+ flusher
+ hijacker
+ readerFrom
+ pusher
+)
+
+type delegator interface {
+ http.ResponseWriter
+
+ Status() int
+ Written() int64
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+ observeWriteHeader func(int)
+}
+
+func (r *responseWriterDelegator) Status() int {
+ return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+ return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+ if r.observeWriteHeader != nil {
+ r.observeWriteHeader(code)
+ }
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type closeNotifierDelegator struct{ *responseWriterDelegator }
+type flusherDelegator struct{ *responseWriterDelegator }
+type hijackerDelegator struct{ *responseWriterDelegator }
+type readerFromDelegator struct{ *responseWriterDelegator }
+
+func (d closeNotifierDelegator) CloseNotify() <-chan bool {
+ return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+func (d flusherDelegator) Flush() {
+ d.ResponseWriter.(http.Flusher).Flush()
+}
+func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return d.ResponseWriter.(http.Hijacker).Hijack()
+}
+func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
+ if !d.wroteHeader {
+ d.WriteHeader(http.StatusOK)
+ }
+ n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+ d.written += n
+ return n, err
+}
+
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
+
+func init() {
+ // TODO(beorn7): Code generation would help here.
+ pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
+ return d
+ }
+ pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
+ return closeNotifierDelegator{d}
+ }
+ pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
+ return flusherDelegator{d}
+ }
+ pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
+ return struct {
+ *responseWriterDelegator
+ http.Flusher
+ http.CloseNotifier
+ }{d, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
+ return hijackerDelegator{d}
+ }
+ pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.CloseNotifier
+ }{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ }{d, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
+ return readerFromDelegator{d}
+ }
+ pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ }{d, readerFromDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
new file mode 100644
index 00000000000..31a70695695
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
@@ -0,0 +1,181 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+type pusherDelegator struct{ *responseWriterDelegator }
+
+func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
+ return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func init() {
+ pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+ return pusherDelegator{d}
+ }
+ pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ }{d, pusherDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ }{d, pusherDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ }{d, pusherDelegator{d}, readerFromDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+ if _, ok := w.(http.Pusher); ok {
+ id += pusher
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
new file mode 100644
index 00000000000..8bb9b8b68f8
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
@@ -0,0 +1,44 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
new file mode 100644
index 00000000000..01357374feb
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -0,0 +1,311 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package promhttp provides tooling around HTTP servers and clients.
+//
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
+//
+// Finally, the package allows for an http.RoundTripper to be instrumented via
+// middleware. Middleware wrappers follow the naming scheme
+// InstrumentRoundTripperX, where X describes the intended use of the
+// middleware. See each function's doc comment for specific details.
+package promhttp
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
+// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
+// no error logging, and it applies compression if requested by the client.
+//
+// The returned http.Handler is already instrumented using the
+// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
+// create multiple http.Handlers by separate calls of the Handler function, the
+// metrics used for instrumentation will be shared between them, providing
+// global scrape counts.
+//
+// This function is meant to cover the bulk of basic use cases. If you are doing
+// anything that requires more customization (including using a non-default
+// Gatherer, different instrumentation, and non-default HandlerOpts), use the
+// HandlerFor function. See there for details.
+func Handler() http.Handler {
+ return InstrumentMetricHandler(
+ prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
+ )
+}
+
+// HandlerFor returns an uninstrumented http.Handler for the provided
+// Gatherer. The behavior of the Handler is defined by the provided
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
+// kind of instrumentation as it is used by the Handler function.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ var inFlightSem chan struct{}
+ if opts.MaxRequestsInFlight > 0 {
+ inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
+ }
+
+ h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if inFlightSem != nil {
+ select {
+ case inFlightSem <- struct{}{}: // All good, carry on.
+ defer func() { <-inFlightSem }()
+ default:
+ http.Error(w, fmt.Sprintf(
+ "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
+ ), http.StatusServiceUnavailable)
+ return
+ }
+ }
+
+ mfs, err := reg.Gather()
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error gathering metrics:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ if len(mfs) == 0 {
+ http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error encoding metric family:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ // Handled later.
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error while sending encoded metrics:", err)
+ }
+ // TODO(beorn7): Consider streaming serving of metrics.
+ })
+
+ if opts.Timeout <= 0 {
+ return h
+ }
+ return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
+ "Exceeded configured timeout of %v.\n",
+ opts.Timeout,
+ ))
+}
+
+// InstrumentMetricHandler is usually used with an http.Handler returned by the
+// HandlerFor function. It instruments the provided http.Handler with two
+// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
+// scrapes partitioned by HTTP status code, and a gauge
+// "promhttp_metric_handler_requests_in_flight" to track the number of
+// simultaneous scrapes. This function idempotently registers collectors for
+// both metrics with the provided Registerer. It panics if the registration
+// fails. The provided metrics are useful to see how many scrapes hit the
+// monitored target (which could be from different Prometheus servers or other
+// scrapers), and how often they overlap (which would result in more than one
+// scrape in flight at the same time). Note that the scrapes-in-flight gauge
+// will contain the scrape by which it is exposed, while the scrape counter will
+// only get incremented after the scrape is complete (as only then the status
+// code is known). For tracking scrape durations, use the
+// "scrape_duration_seconds" gauge created by the Prometheus server upon each
+// scrape.
+func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
+ cnt := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "promhttp_metric_handler_requests_total",
+ Help: "Total number of scrapes by HTTP status code.",
+ },
+ []string{"code"},
+ )
+ // Initialize the most likely HTTP status codes.
+ cnt.WithLabelValues("200")
+ cnt.WithLabelValues("500")
+ cnt.WithLabelValues("503")
+ if err := reg.Register(cnt); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ cnt = are.ExistingCollector.(*prometheus.CounterVec)
+ } else {
+ panic(err)
+ }
+ }
+
+ gge := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "promhttp_metric_handler_requests_in_flight",
+ Help: "Current number of scrapes being served.",
+ })
+ if err := reg.Register(gge); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ gge = are.ExistingCollector.(prometheus.Gauge)
+ } else {
+ panic(err)
+ }
+ }
+
+ return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // Serve an HTTP status code 500 upon the first error
+ // encountered. Report the error message in the body.
+ HTTPErrorOnError HandlerErrorHandling = iota
+ // Ignore errors and try to serve as many metrics as possible. However,
+ // if no metrics can be served, serve an HTTP status code 500 and the
+ // last error message in the body. Only use this in deliberate "best
+ // effort" metrics collection scenarios. It is recommended to at least
+ // log errors (by providing an ErrorLog in HandlerOpts) to not mask
+ // errors completely.
+ ContinueOnError
+ // Panic upon the first error encountered (useful for "crash only" apps).
+ PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+ Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+ // ErrorLog specifies an optional logger for errors collecting and
+ // serving metrics. If nil, errors are not logged at all.
+ ErrorLog Logger
+ // ErrorHandling defines how errors are handled. Note that errors are
+ // logged regardless of the configured ErrorHandling provided ErrorLog
+ // is not nil.
+ ErrorHandling HandlerErrorHandling
+ // If DisableCompression is true, the handler will never compress the
+ // response, even if requested by the client.
+ DisableCompression bool
+ // The number of concurrent HTTP requests is limited to
+ // MaxRequestsInFlight. Additional requests are responded to with 503
+ // Service Unavailable and a suitable message in the body. If
+ // MaxRequestsInFlight is 0 or negative, no limit is applied.
+ MaxRequestsInFlight int
+ // If handling a request takes longer than Timeout, it is responded to
+ // with 503 ServiceUnavailable and a suitable Message. No timeout is
+ // applied if Timeout is 0 or negative. Note that with the current
+ // implementation, reaching the timeout simply ends the HTTP requests as
+ // described above (and even that only if sending of the body hasn't
+ // started yet), while the bulk work of gathering all the metrics keeps
+ // running in the background (with the eventual result to be thrown
+ // away). Until the implementation is improved, it is recommended to
+ // implement a separate timeout in potentially slow Collectors.
+ Timeout time.Duration
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
+ if compressionDisabled {
+ return writer, ""
+ }
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part = strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
new file mode 100644
index 00000000000..86fd564470f
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -0,0 +1,97 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
+// functions as RoundTrippers. If f is a function with the appropriate
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
+
+// RoundTrip implements the RoundTripper interface.
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+ return rt(r)
+}
+
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.RoundTripper.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ gauge.Inc()
+ defer gauge.Dec()
+ return next.RoundTrip(r)
+ })
+}
+
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
+// http.RoundTripper to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
+// and/or HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
+// is not incremented.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(counter)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+ }
+ return resp, err
+ })
+}
+
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
+// http.RoundTripper to observe the request duration with the provided
+// ObserverVec. The ObserverVec must have zero, one, or two non-const
+// non-curried labels. For those, the only allowed label names are "code" and
+// "method". The function panics otherwise. The Observe method of the Observer
+// in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(obs)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+ }
+ return resp, err
+ })
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
new file mode 100644
index 00000000000..a034d1ec0f1
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
@@ -0,0 +1,144 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+)
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+ GotConn func(float64)
+ PutIdleConn func(float64)
+ GotFirstResponseByte func(float64)
+ Got100Continue func(float64)
+ DNSStart func(float64)
+ DNSDone func(float64)
+ ConnectStart func(float64)
+ ConnectDone func(float64)
+ TLSHandshakeStart func(float64)
+ TLSHandshakeDone func(float64)
+ WroteHeaders func(float64)
+ Wait100Continue func(float64)
+ WroteRequest func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+
+ trace := &httptrace.ClientTrace{
+ GotConn: func(_ httptrace.GotConnInfo) {
+ if it.GotConn != nil {
+ it.GotConn(time.Since(start).Seconds())
+ }
+ },
+ PutIdleConn: func(err error) {
+ if err != nil {
+ return
+ }
+ if it.PutIdleConn != nil {
+ it.PutIdleConn(time.Since(start).Seconds())
+ }
+ },
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ if it.DNSStart != nil {
+ it.DNSStart(time.Since(start).Seconds())
+ }
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ if it.DNSDone != nil {
+ it.DNSDone(time.Since(start).Seconds())
+ }
+ },
+ ConnectStart: func(_, _ string) {
+ if it.ConnectStart != nil {
+ it.ConnectStart(time.Since(start).Seconds())
+ }
+ },
+ ConnectDone: func(_, _ string, err error) {
+ if err != nil {
+ return
+ }
+ if it.ConnectDone != nil {
+ it.ConnectDone(time.Since(start).Seconds())
+ }
+ },
+ GotFirstResponseByte: func() {
+ if it.GotFirstResponseByte != nil {
+ it.GotFirstResponseByte(time.Since(start).Seconds())
+ }
+ },
+ Got100Continue: func() {
+ if it.Got100Continue != nil {
+ it.Got100Continue(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeStart: func() {
+ if it.TLSHandshakeStart != nil {
+ it.TLSHandshakeStart(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+ if err != nil {
+ return
+ }
+ if it.TLSHandshakeDone != nil {
+ it.TLSHandshakeDone(time.Since(start).Seconds())
+ }
+ },
+ WroteHeaders: func() {
+ if it.WroteHeaders != nil {
+ it.WroteHeaders(time.Since(start).Seconds())
+ }
+ },
+ Wait100Continue: func() {
+ if it.Wait100Continue != nil {
+ it.Wait100Continue(time.Since(start).Seconds())
+ }
+ },
+ WroteRequest: func(_ httptrace.WroteRequestInfo) {
+ if it.WroteRequest != nil {
+ it.WroteRequest(time.Since(start).Seconds())
+ }
+ },
+ }
+ r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
+
+ return next.RoundTrip(r)
+ })
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
new file mode 100644
index 00000000000..9db24380533
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -0,0 +1,447 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "errors"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ g.Inc()
+ defer g.Dec()
+ next.ServeHTTP(w, r)
+ })
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ next.ServeHTTP(w, r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+ })
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
+// to observe the request result with the provided CounterVec. The CounterVec
+// must have zero, one, or two non-const non-curried labels. For those, the only
+// allowed label names are "code" and "method". The function panics
+// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
+// HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(counter)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ counter.With(labels(code, method, r.Method, d.Status())).Inc()
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ counter.With(labels(code, method, r.Method, 0)).Inc()
+ })
+}
+
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
+// http.Handler to observe with the provided ObserverVec the request duration
+// until the response headers are written. The ObserverVec must have zero, one,
+// or two non-const non-curried labels. For those, the only allowed label names
+// are "code" and "method". The function panics otherwise. The Observe method of
+// the Observer in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped Handler panics before calling WriteHeader, no value is
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, func(status int) {
+ obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+ })
+ next.ServeHTTP(d, r)
+ })
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec. The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+ })
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec. The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the response size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+ code, method := checkLabels(obs)
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+ })
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+ // TODO(beorn7): Remove this hacky way to check for instance labels
+ // once Descriptors can have their dimensionality queried.
+ var (
+ desc *prometheus.Desc
+ m prometheus.Metric
+ pm dto.Metric
+ lvs []string
+ )
+
+ // Get the Desc from the Collector.
+ descc := make(chan *prometheus.Desc, 1)
+ c.Describe(descc)
+
+ select {
+ case desc = <-descc:
+ default:
+ panic("no description provided by collector")
+ }
+ select {
+ case <-descc:
+ panic("more than one description provided by collector")
+ default:
+ }
+
+ close(descc)
+
+ // Create a ConstMetric with the Desc. Since we don't know how many
+ // variable labels there are, try for as long as it needs.
+ for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
+ m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
+ }
+
+ // Write out the metric into a proto message and look at the labels.
+ // If the value is not the magicString, it is a constLabel, which doesn't interest us.
+ // If the label is curried, it doesn't interest us.
+ // In all other cases, only "code" or "method" is allowed.
+ if err := m.Write(&pm); err != nil {
+ panic("error checking metric for labels")
+ }
+ for _, label := range pm.Label {
+ name, value := label.GetName(), label.GetValue()
+ if value != magicString || isLabelCurried(c, name) {
+ continue
+ }
+ switch name {
+ case "code":
+ code = true
+ case "method":
+ method = true
+ default:
+ panic("metric partitioned with non-supported labels")
+ }
+ }
+ return
+}
+
+func isLabelCurried(c prometheus.Collector, label string) bool {
+ // This is even hackier than the label test above.
+ // We essentially try to curry again and see if it works.
+ // But for that, we need to type-convert to the two
+ // types we use here, ObserverVec or *CounterVec.
+ switch v := c.(type) {
+ case *prometheus.CounterVec:
+ if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+ return false
+ }
+ case prometheus.ObserverVec:
+ if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+ return false
+ }
+ default:
+ panic("unsupported metric vec type")
+ }
+ return true
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+ if !(code || method) {
+ return emptyLabels
+ }
+ labels := prometheus.Labels{}
+
+ if code {
+ labels["code"] = sanitizeCode(status)
+ }
+ if method {
+ labels["method"] = sanitizeMethod(reqMethod)
+ }
+
+ return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ return s
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200, 0:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 00000000000..e422ef3834c
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,895 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
+)
+
+const (
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+)
+
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (currently on Linux only, see NewProcessCollector)
+// and a Go collector (see NewGoCollector, in particular the note about
+// stop-the-world implication with Go versions older than 1.9) already
+// registered. This approach to keep default instances as global state mirrors
+// the approach of other packages in the Go standard library. Note that there
+// are caveats. Change the variables with caution and only if you understand the
+// consequences. Users who want to avoid global state altogether should not use
+// the convenience functions and act on custom instances instead.
+var (
+ defaultRegistry = NewRegistry()
+ DefaultRegisterer Registerer = defaultRegistry
+ DefaultGatherer Gatherer = defaultRegistry
+)
+
+func init() {
+ MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
+ MustRegister(NewGoCollector())
+}
+
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+ return &Registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ }
+}
+
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry. Unchecked Collectors (those whose
+// Describe methed does not yield any descriptors) are excluded from the check.
+//
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+ r := NewRegistry()
+ r.pedanticChecksEnabled = true
+ return r
+}
+
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather than the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+ // Register registers a new Collector to be included in metrics
+ // collection. It returns an error if the descriptors provided by the
+ // Collector are invalid or if they — in combination with descriptors of
+ // already registered Collectors — do not fulfill the consistency and
+ // uniqueness criteria described in the documentation of metric.Desc.
+ //
+ // If the provided Collector is equal to a Collector already registered
+ // (which includes the case of re-registering the same Collector), the
+ // returned error is an instance of AlreadyRegisteredError, which
+ // contains the previously registered Collector.
+ //
+ // A Collector whose Describe method does not yield any Desc is treated
+ // as unchecked. Registration will always succeed. No check for
+ // re-registering (see previous paragraph) is performed. Thus, the
+ // caller is responsible for not double-registering the same unchecked
+ // Collector, and for providing a Collector that will not cause
+ // inconsistent metrics on collection. (This would lead to scrape
+ // errors.)
+ Register(Collector) error
+ // MustRegister works like Register but registers any number of
+ // Collectors and panics upon the first registration that causes an
+ // error.
+ MustRegister(...Collector)
+ // Unregister unregisters the Collector that equals the Collector passed
+ // in as an argument. (Two Collectors are considered equal if their
+ // Describe method yields the same set of descriptors.) The function
+ // returns whether a Collector was unregistered. Note that an unchecked
+ // Collector cannot be unregistered (as its Describe method does not
+ // yield any descriptor).
+ //
+ // Note that even after unregistering, it will not be possible to
+ // register a new Collector that is inconsistent with the unregistered
+ // Collector, e.g. a Collector collecting metrics with the same name but
+ // a different help string. The rationale here is that the same registry
+ // instance must only collect consistent metrics throughout its
+ // lifetime.
+ Unregister(Collector) bool
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+ // Gather calls the Collect method of the registered Collectors and then
+ // gathers the collected metrics into a lexicographically sorted slice
+ // of uniquely named MetricFamily protobufs. Gather ensures that the
+ // returned slice is valid and self-consistent so that it can be used
+ // for valid exposition. As an exception to the strict consistency
+ // requirements described for metric.Desc, Gather will tolerate
+ // different sets of label names for metrics of the same metric family.
+ //
+ // Even if an error occurs, Gather attempts to gather as many metrics as
+ // possible. Hence, if a non-nil error is returned, the returned
+ // MetricFamily slice could be nil (in case of a fatal error that
+ // prevented any meaningful metric collection) or contain a number of
+ // MetricFamily protobufs, some of which might be incomplete, and some
+ // might be missing altogether. The returned error (which might be a
+ // MultiError) explains the details. Note that this is mostly useful for
+ // debugging purposes. If the gathered protobufs are to be used for
+ // exposition in actual monitoring, it is almost always better to not
+ // expose an incomplete result and instead disregard the returned
+ // MetricFamily protobufs in case the returned error is non-nil.
+ Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
+//
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+ return DefaultRegisterer.Register(c)
+}
+
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+ DefaultRegisterer.MustRegister(cs...)
+}
+
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
+func Unregister(c Collector) bool {
+ return DefaultRegisterer.Unregister(c)
+}
+
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+ return gf()
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+ ExistingCollector, NewCollector Collector
+}
+
+func (err AlreadyRegisteredError) Error() string {
+ return "duplicate metrics collector registration attempted"
+}
+
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+func (errs MultiError) Error() string {
+ if len(errs) == 0 {
+ return ""
+ }
+ buf := &bytes.Buffer{}
+ fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+ for _, err := range errs {
+ fmt.Fprintf(buf, "\n* %s", err)
+ }
+ return buf.String()
+}
+
+// Append appends the provided error if it is not nil.
+func (errs *MultiError) Append(err error) {
+ if err != nil {
+ *errs = append(*errs, err)
+ }
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errs
+ }
+}
+
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements both Registerer and
+// Gatherer. The zero value is not usable. Create instances with NewRegistry or
+// NewPedanticRegistry.
+type Registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ uncheckedCollectors []Collector
+ pedanticChecksEnabled bool
+}
+
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ newDescIDs = map[uint64]struct{}{}
+ newDimHashesByName = map[string]uint64{}
+ collectorID uint64 // Just a sum of all desc IDs.
+ duplicateDescErr error
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ r.mtx.Lock()
+ defer func() {
+ // Drain channel in case of premature return to not leak a goroutine.
+ for range descChan {
+ }
+ r.mtx.Unlock()
+ }()
+ // Conduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // A Collector yielding no Desc at all is considered unchecked.
+ if len(newDescIDs) == 0 {
+ r.uncheckedCollectors = append(r.uncheckedCollectors, c)
+ return nil
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return AlreadyRegisteredError{
+ ExistingCollector: existing,
+ NewCollector: c,
+ }
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return nil
+}
+
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ descIDs = map[uint64]struct{}{}
+ collectorID uint64 // Just a sum of the desc IDs.
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ checkedMetricChan = make(chan Metric, capMetricChan)
+ uncheckedMetricChan = make(chan Metric, capMetricChan)
+ metricHashes = map[uint64]struct{}{}
+ wg sync.WaitGroup
+ errs MultiError // The collected errors to return in the end.
+ registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+ )
+
+ r.mtx.RLock()
+ goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+ checkedCollectors := make(chan Collector, len(r.collectorsByID))
+ uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
+ for _, collector := range r.collectorsByID {
+ checkedCollectors <- collector
+ }
+ for _, collector := range r.uncheckedCollectors {
+ uncheckedCollectors <- collector
+ }
+ // In case pedantic checks are enabled, we have to copy the map before
+ // giving up the RLock.
+ if r.pedanticChecksEnabled {
+ registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+ for id := range r.descIDs {
+ registeredDescIDs[id] = struct{}{}
+ }
+ }
+ r.mtx.RUnlock()
+
+ wg.Add(goroutineBudget)
+
+ collectWorker := func() {
+ for {
+ select {
+ case collector := <-checkedCollectors:
+ collector.Collect(checkedMetricChan)
+ case collector := <-uncheckedCollectors:
+ collector.Collect(uncheckedMetricChan)
+ default:
+ return
+ }
+ wg.Done()
+ }
+ }
+
+ // Start the first worker now to make sure at least one is running.
+ go collectWorker()
+ goroutineBudget--
+
+ // Close checkedMetricChan and uncheckedMetricChan once all collectors
+ // are collected.
+ go func() {
+ wg.Wait()
+ close(checkedMetricChan)
+ close(uncheckedMetricChan)
+ }()
+
+ // Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
+ defer func() {
+ if checkedMetricChan != nil {
+ for range checkedMetricChan {
+ }
+ }
+ if uncheckedMetricChan != nil {
+ for range uncheckedMetricChan {
+ }
+ }
+ }()
+
+ // Copy the channel references so we can nil them out later to remove
+ // them from the select statements below.
+ cmc := checkedMetricChan
+ umc := uncheckedMetricChan
+
+ for {
+ select {
+ case metric, ok := <-cmc:
+ if !ok {
+ cmc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ registeredDescIDs,
+ ))
+ case metric, ok := <-umc:
+ if !ok {
+ umc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ nil,
+ ))
+ default:
+ if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
+ // All collectors are already being worked on or
+ // we have already as many goroutines started as
+ // there are collectors. Do the same as above,
+ // just without the default.
+ select {
+ case metric, ok := <-cmc:
+ if !ok {
+ cmc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ registeredDescIDs,
+ ))
+ case metric, ok := <-umc:
+ if !ok {
+ umc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ nil,
+ ))
+ }
+ break
+ }
+ // Start more workers.
+ go collectWorker()
+ goroutineBudget--
+ runtime.Gosched()
+ }
+ // Once both checkedMetricChan and uncheckdMetricChan are closed
+ // and drained, the contraption above will nil out cmc and umc,
+ // and then we can leave the collect loop here.
+ if cmc == nil && umc == nil {
+ break
+ }
+ }
+ return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// processMetric is an internal helper method only used by the Gather method.
+func processMetric(
+ metric Metric,
+ metricFamiliesByName map[string]*dto.MetricFamily,
+ metricHashes map[uint64]struct{},
+ registeredDescIDs map[uint64]struct{},
+) error {
+ desc := metric.Desc()
+ // Wrapped metrics collected by an unchecked Collector can have an
+ // invalid Desc.
+ if desc.err != nil {
+ return desc.err
+ }
+ dtoMetric := &dto.Metric{}
+ if err := metric.Write(dtoMetric); err != nil {
+ return fmt.Errorf("error collecting metric %v: %s", desc, err)
+ }
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if ok { // Existing name.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+ )
+ }
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch metricFamily.GetType() {
+ case dto.MetricType_COUNTER:
+ if dtoMetric.Counter == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Counter",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_GAUGE:
+ if dtoMetric.Gauge == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Gauge",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_SUMMARY:
+ if dtoMetric.Summary == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Summary",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_UNTYPED:
+ if dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be Untyped",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ if dtoMetric.Histogram == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Histogram",
+ desc.fqName, dtoMetric,
+ )
+ }
+ default:
+ panic("encountered MetricFamily with invalid type")
+ }
+ } else { // New name.
+ metricFamily = &dto.MetricFamily{}
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch {
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ return fmt.Errorf("empty metric collected: %s", dtoMetric)
+ }
+ if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
+ return err
+ }
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
+ return err
+ }
+ if registeredDescIDs != nil {
+ // Is the desc registered at all?
+ if _, exist := registeredDescIDs[desc.id]; !exist {
+ return fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+ return err
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ return nil
+}
+
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calles are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricFamiliesByName = map[string]*dto.MetricFamily{}
+ metricHashes = map[uint64]struct{}{}
+ errs MultiError // The collected errors to return in the end.
+ )
+
+ for i, g := range gs {
+ mfs, err := g.Gather()
+ if err != nil {
+ if multiErr, ok := err.(MultiError); ok {
+ for _, err := range multiErr {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ } else {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ }
+ for _, mf := range mfs {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if exists {
+ if existingMF.GetHelp() != mf.GetHelp() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has help %q but should have %q",
+ mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+ ))
+ continue
+ }
+ if existingMF.GetType() != mf.GetType() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has type %s but should have %s",
+ mf.GetName(), mf.GetType(), existingMF.GetType(),
+ ))
+ continue
+ }
+ } else {
+ existingMF = &dto.MetricFamily{}
+ existingMF.Name = mf.Name
+ existingMF.Help = mf.Help
+ existingMF.Type = mf.Type
+ if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ metricFamiliesByName[mf.GetName()] = existingMF
+ }
+ for _, m := range mf.Metric {
+ if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+ return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// checkSuffixCollisions checks for collisions with the “magic” suffixes the
+// Prometheus text format and the internal metric representation of the
+// Prometheus server add while flattening Summaries and Histograms.
+func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
+ var (
+ newName = mf.GetName()
+ newType = mf.GetType()
+ newNameWithoutSuffix = ""
+ )
+ switch {
+ case strings.HasSuffix(newName, "_count"):
+ newNameWithoutSuffix = newName[:len(newName)-6]
+ case strings.HasSuffix(newName, "_sum"):
+ newNameWithoutSuffix = newName[:len(newName)-4]
+ case strings.HasSuffix(newName, "_bucket"):
+ newNameWithoutSuffix = newName[:len(newName)-7]
+ }
+ if newNameWithoutSuffix != "" {
+ if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
+ switch existingMF.GetType() {
+ case dto.MetricType_SUMMARY:
+ if !strings.HasSuffix(newName, "_bucket") {
+ return fmt.Errorf(
+ "collected metric named %q collides with previously collected summary named %q",
+ newName, newNameWithoutSuffix,
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ return fmt.Errorf(
+ "collected metric named %q collides with previously collected histogram named %q",
+ newName, newNameWithoutSuffix,
+ )
+ }
+ }
+ }
+ if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
+ if _, ok := mfs[newName+"_count"]; ok {
+ return fmt.Errorf(
+ "collected histogram or summary named %q collides with previously collected metric named %q",
+ newName, newName+"_count",
+ )
+ }
+ if _, ok := mfs[newName+"_sum"]; ok {
+ return fmt.Errorf(
+ "collected histogram or summary named %q collides with previously collected metric named %q",
+ newName, newName+"_sum",
+ )
+ }
+ }
+ if newType == dto.MetricType_HISTOGRAM {
+ if _, ok := mfs[newName+"_bucket"]; ok {
+ return fmt.Errorf(
+ "collected histogram named %q collides with previously collected metric named %q",
+ newName, newName+"_bucket",
+ )
+ }
+ }
+ return nil
+}
+
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
+// name. If the resulting hash is already in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes.
+func checkMetricConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ metricHashes map[uint64]struct{},
+) error {
+ name := metricFamily.GetName()
+
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %q { %s} is not a %s",
+ name, dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ previousLabelName := ""
+ for _, labelPair := range dtoMetric.GetLabel() {
+ labelName := labelPair.GetName()
+ if labelName == previousLabelName {
+ return fmt.Errorf(
+ "collected metric %q { %s} has two or more labels with the same name: %s",
+ name, dtoMetric, labelName,
+ )
+ }
+ if !checkLabelName(labelName) {
+ return fmt.Errorf(
+ "collected metric %q { %s} has a label with an invalid name: %s",
+ name, dtoMetric, labelName,
+ )
+ }
+ if dtoMetric.Summary != nil && labelName == quantileLabel {
+ return fmt.Errorf(
+ "collected metric %q { %s} must not have an explicit %q label",
+ name, dtoMetric, quantileLabel,
+ )
+ }
+ if !utf8.ValidString(labelPair.GetValue()) {
+ return fmt.Errorf(
+ "collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
+ name, dtoMetric, labelName, labelPair.GetValue())
+ }
+ previousLabelName = labelName
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same labels)?
+ h := hashNew()
+ h = hashAdd(h, name)
+ h = hashAddByte(h, separatorByte)
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check.
+ sort.Sort(labelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ h = hashAdd(h, lp.GetName())
+ h = hashAddByte(h, separatorByte)
+ h = hashAdd(h, lp.GetValue())
+ h = hashAddByte(h, separatorByte)
+ }
+ if _, exists := metricHashes[h]; exists {
+ return fmt.Errorf(
+ "collected metric %q { %s} was collected before with the same name and label values",
+ name, dtoMetric,
+ )
+ }
+ metricHashes[h] = struct{}{}
+ return nil
+}
+
+func checkDescConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ desc *Desc,
+) error {
+ // Desc help consistency with metric family help.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(labelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+ return nil
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 00000000000..f7e92d82945
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,626 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations. However, the default behavior will change in the
+// upcoming v0.10 of the library. There will be no rank estimations at all by
+// default. For a sane transition, it is recommended to set the desired rank
+// estimations explicitly.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+// DefObjectives are the default Summary quantile values.
+//
+// Deprecated: DefObjectives will not be used as the default objectives in
+// v0.10 of the library. The default Summary will have no quantiles then.
+var (
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name to a non-empty string. While all other fields are
+// optional and can safely be left at their zero value, it is recommended to set
+// a help string and to explicitly set the Objectives field to the desired value
+// as the default value will change in the upcoming v0.10 of the library.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary.
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Due to the way a Summary is represented in the Prometheus text format
+ // and how it is handled by the Prometheus server internally, “quantile”
+ // is an illegal label name. Construction of a Summary or SummaryVec
+ // will panic if this label name is used in ConstLabels.
+ //
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported for q
+ // will be the φ-quantile value for some φ between q-e and q+e. The
+ // default value is DefObjectives. It is used if Objectives is left at
+ // its zero value (i.e. nil). To create a Summary without Objectives,
+ // set it to an empty map (i.e. map[float64]float64{}).
+ //
+ // Deprecated: Note that the current value of DefObjectives is
+ // deprecated. It will be replaced by an empty map in v0.10 of the
+ // library. Please explicitly set Objectives to the desired value.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// Great fuck-up with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if opts.Objectives == nil {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ selfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ *metricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names.
+//
+// Due to the way a Summary is represented in the Prometheus text format and how
+// it is handled by the Prometheus server internally, “quantile” is an illegal
+// label name. NewSummaryVec will panic if this label name is used.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ for _, ln := range labelNames {
+ if ln == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues returns the Summary for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Summary is created.
+//
+// It is possible to call this method without using the returned Summary to only
+// create the new Summary but leave it at its starting value, a Summary without
+// any observations.
+//
+// Keeping the Summary for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Summary from the SummaryVec. In that case,
+// the Summary will still exist, but it will not be exported anymore, even if a
+// Summary with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// GetMetricWith returns the Summary for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Summary is created. Implications of
+// creating a Summary without using it and keeping the Summary for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
+ if metric != nil {
+ return metric.(Observer), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
+ s, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *SummaryVec) With(labels Labels) Observer {
+ s, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the SummaryVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &SummaryVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/timer.go
new file mode 100644
index 00000000000..b8fc5f18c85
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+ begin time.Time
+ observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. Timer is usually used to time a function call in the
+// following way:
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
+func NewTimer(o Observer) *Timer {
+ return &Timer{
+ begin: time.Now(),
+ observer: o,
+ }
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. ObserveDuration is
+// usually called with a defer statement.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func (t *Timer) ObserveDuration() {
+ if t.observer != nil {
+ t.observer.Observe(time.Since(t.begin).Seconds())
+ }
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 00000000000..0f9ce63f409
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,42 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// UntypedFunc works like GaugeFunc but the collected metric is of type
+// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
+// type.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/value.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 00000000000..eb248f10874
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,162 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc or if Desc is
+// invalid.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ labelPairs = append(labelPairs, desc.constLabelPairs...)
+ sort.Sort(labelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 00000000000..14ed9e856d1
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,472 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/prometheus/common/model"
+)
+
+// metricVec is a Collector to bundle metrics of the same name that differ in
+// their label values. metricVec is not used directly (and therefore
+// unexported). It is used as a building block for implementations of vectors of
+// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
+// It also handles label currying. It uses basicMetricVec internally.
+type metricVec struct {
+ *metricMap
+
+ curry []curriedLabelValue
+
+ // hashAdd and hashAddByte can be replaced for testing collision handling.
+ hashAdd func(h uint64, s string) uint64
+ hashAddByte func(h uint64, b byte) uint64
+}
+
+// newMetricVec returns an initialized metricVec.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
+ return &metricVec{
+ metricMap: &metricMap{
+ metrics: map[uint64][]metricWithLabelValues{},
+ desc: desc,
+ newMetric: newMetric,
+ },
+ hashAdd: hashAdd,
+ hashAddByte: hashAddByte,
+ }
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+
+ return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *metricVec) Delete(labels Labels) bool {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+
+ return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
+}
+
+func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
+ var (
+ newCurry []curriedLabelValue
+ oldCurry = m.curry
+ iCurry int
+ )
+ for i, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
+ if ok {
+ return nil, fmt.Errorf("label name %q is already curried", label)
+ }
+ newCurry = append(newCurry, oldCurry[iCurry])
+ iCurry++
+ } else {
+ if !ok {
+ continue // Label stays uncurried.
+ }
+ newCurry = append(newCurry, curriedLabelValue{i, val})
+ }
+ }
+ if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
+ return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
+ }
+
+ return &metricVec{
+ metricMap: m.metricMap,
+ curry: newCurry,
+ hashAdd: m.hashAdd,
+ hashAddByte: m.hashAddByte,
+ }, nil
+}
+
+func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
+}
+
+func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
+}
+
+func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
+ if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ return 0, err
+ }
+
+ var (
+ h = hashNew()
+ curry = m.curry
+ iVals, iCurry int
+ )
+ for i := 0; i < len(m.desc.variableLabels); i++ {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ h = m.hashAdd(h, curry[iCurry].value)
+ iCurry++
+ } else {
+ h = m.hashAdd(h, vals[iVals])
+ iVals++
+ }
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
+ if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ return 0, err
+ }
+
+ var (
+ h = hashNew()
+ curry = m.curry
+ iCurry int
+ )
+ for i, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if ok {
+ return 0, fmt.Errorf("label name %q is already curried", label)
+ }
+ h = m.hashAdd(h, curry[iCurry].value)
+ iCurry++
+ } else {
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ h = m.hashAdd(h, val)
+ }
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+ values []string
+ metric Metric
+}
+
+// curriedLabelValue sets the curried value for a label at the given index.
+type curriedLabelValue struct {
+ index int
+ value string
+}
+
+// metricMap is a helper for metricVec and shared between differently curried
+// metricVecs.
+type metricMap struct {
+ mtx sync.RWMutex // Protects metrics.
+ metrics map[uint64][]metricWithLabelValues
+ desc *Desc
+ newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. It will send exactly one Desc to the provided
+// channel.
+func (m *metricMap) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *metricMap) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metrics := range m.metrics {
+ for _, metric := range metrics {
+ ch <- metric.metric
+ }
+ }
+}
+
+// Reset deletes all metrics in this vector.
+func (m *metricMap) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.metrics {
+ delete(m.metrics, h)
+ }
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *metricMap) deleteByHashWithLabelValues(
+ h uint64, lvs []string, curry []curriedLabelValue,
+) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ metrics, ok := m.metrics[h]
+ if !ok {
+ return false
+ }
+
+ i := findMetricWithLabelValues(metrics, lvs, curry)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.metrics, h)
+ }
+ return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *metricMap) deleteByHashWithLabels(
+ h uint64, labels Labels, curry []curriedLabelValue,
+) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ metrics, ok := m.metrics[h]
+ if !ok {
+ return false
+ }
+ i := findMetricWithLabels(m.desc, metrics, labels, curry)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.metrics, h)
+ }
+ return true
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabelValues(
+ hash uint64, lvs []string, curry []curriedLabelValue,
+) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+ if !ok {
+ inlinedLVs := inlineLabelValues(lvs, curry)
+ metric = m.newMetric(inlinedLVs...)
+ m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
+ }
+ return metric
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabels(
+ hash uint64, labels Labels, curry []curriedLabelValue,
+) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
+ if !ok {
+ lvs := extractLabelValues(m.desc, labels, curry)
+ metric = m.newMetric(lvs...)
+ m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
+ }
+ return metric
+}
+
+// getMetricWithHashAndLabelValues gets a metric while handling possible
+// collisions in the hash space. Must be called while holding the read mutex.
+func (m *metricMap) getMetricWithHashAndLabelValues(
+ h uint64, lvs []string, curry []curriedLabelValue,
+) (Metric, bool) {
+ metrics, ok := m.metrics[h]
+ if ok {
+ if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// getMetricWithHashAndLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *metricMap) getMetricWithHashAndLabels(
+ h uint64, labels Labels, curry []curriedLabelValue,
+) (Metric, bool) {
+ metrics, ok := m.metrics[h]
+ if ok {
+ if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithLabelValues(
+ metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
+) int {
+ for i, metric := range metrics {
+ if matchLabelValues(metric.values, lvs, curry) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func findMetricWithLabels(
+ desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+ for i, metric := range metrics {
+ if matchLabels(desc, metric.values, labels, curry) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
+ if len(values) != len(lvs)+len(curry) {
+ return false
+ }
+ var iLVs, iCurry int
+ for i, v := range values {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if v != curry[iCurry].value {
+ return false
+ }
+ iCurry++
+ continue
+ }
+ if v != lvs[iLVs] {
+ return false
+ }
+ iLVs++
+ }
+ return true
+}
+
+func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+ if len(values) != len(labels)+len(curry) {
+ return false
+ }
+ iCurry := 0
+ for i, k := range desc.variableLabels {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if values[i] != curry[iCurry].value {
+ return false
+ }
+ iCurry++
+ continue
+ }
+ if values[i] != labels[k] {
+ return false
+ }
+ }
+ return true
+}
+
+func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
+ labelValues := make([]string, len(labels)+len(curry))
+ iCurry := 0
+ for i, k := range desc.variableLabels {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ labelValues[i] = curry[iCurry].value
+ iCurry++
+ continue
+ }
+ labelValues[i] = labels[k]
+ }
+ return labelValues
+}
+
+func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
+ labelValues := make([]string, len(lvs)+len(curry))
+ var iCurry, iLVs int
+ for i := range labelValues {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ labelValues[i] = curry[iCurry].value
+ iCurry++
+ continue
+ }
+ labelValues[i] = lvs[iLVs]
+ iLVs++
+ }
+ return labelValues
+}
diff --git a/agent/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/agent/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
new file mode 100644
index 00000000000..49159bf3eb0
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -0,0 +1,179 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// WrapRegistererWith returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapRegistererWith provides a way to add fixed labels to a subset of
+// Collectors. It should not be used to add fixed labels to all metrics exposed.
+//
+// The Collector example demonstrates a use of WrapRegistererWith.
+func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
+ return &wrappingRegisterer{
+ wrappedRegisterer: reg,
+ labels: labels,
+ }
+}
+
+// WrapRegistererWithPrefix returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided prefix to the name of all Metrics it collects.
+//
+// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
+// a sub-system. To make this work, register metrics of the sub-system with the
+// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
+// to use the same prefix for all metrics exposed. In particular, do not prefix
+// metric names that are standardized across applications, as that would break
+// horizontal monitoring, for example the metrics provided by the Go collector
+// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
+// fact, those metrics are already prefixed with “go_” or “process_”,
+// respectively.)
+func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
+ return &wrappingRegisterer{
+ wrappedRegisterer: reg,
+ prefix: prefix,
+ }
+}
+
+type wrappingRegisterer struct {
+ wrappedRegisterer Registerer
+ prefix string
+ labels Labels
+}
+
+func (r *wrappingRegisterer) Register(c Collector) error {
+ return r.wrappedRegisterer.Register(&wrappingCollector{
+ wrappedCollector: c,
+ prefix: r.prefix,
+ labels: r.labels,
+ })
+}
+
+func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func (r *wrappingRegisterer) Unregister(c Collector) bool {
+ return r.wrappedRegisterer.Unregister(&wrappingCollector{
+ wrappedCollector: c,
+ prefix: r.prefix,
+ labels: r.labels,
+ })
+}
+
+type wrappingCollector struct {
+ wrappedCollector Collector
+ prefix string
+ labels Labels
+}
+
+func (c *wrappingCollector) Collect(ch chan<- Metric) {
+ wrappedCh := make(chan Metric)
+ go func() {
+ c.wrappedCollector.Collect(wrappedCh)
+ close(wrappedCh)
+ }()
+ for m := range wrappedCh {
+ ch <- &wrappingMetric{
+ wrappedMetric: m,
+ prefix: c.prefix,
+ labels: c.labels,
+ }
+ }
+}
+
+func (c *wrappingCollector) Describe(ch chan<- *Desc) {
+ wrappedCh := make(chan *Desc)
+ go func() {
+ c.wrappedCollector.Describe(wrappedCh)
+ close(wrappedCh)
+ }()
+ for desc := range wrappedCh {
+ ch <- wrapDesc(desc, c.prefix, c.labels)
+ }
+}
+
+type wrappingMetric struct {
+ wrappedMetric Metric
+ prefix string
+ labels Labels
+}
+
+func (m *wrappingMetric) Desc() *Desc {
+ return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
+}
+
+func (m *wrappingMetric) Write(out *dto.Metric) error {
+ if err := m.wrappedMetric.Write(out); err != nil {
+ return err
+ }
+ if len(m.labels) == 0 {
+ // No wrapping labels.
+ return nil
+ }
+ for ln, lv := range m.labels {
+ out.Label = append(out.Label, &dto.LabelPair{
+ Name: proto.String(ln),
+ Value: proto.String(lv),
+ })
+ }
+ sort.Sort(labelPairSorter(out.Label))
+ return nil
+}
+
+func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
+ constLabels := Labels{}
+ for _, lp := range desc.constLabelPairs {
+ constLabels[*lp.Name] = *lp.Value
+ }
+ for ln, lv := range labels {
+ if _, alreadyUsed := constLabels[ln]; alreadyUsed {
+ return &Desc{
+ fqName: desc.fqName,
+ help: desc.help,
+ variableLabels: desc.variableLabels,
+ constLabelPairs: desc.constLabelPairs,
+ err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
+ }
+ }
+ constLabels[ln] = lv
+ }
+ // NewDesc will do remaining validations.
+ newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+ // Propagate errors if there was any. This will override any errer
+ // created by NewDesc above, i.e. earlier errors get precedence.
+ if desc.err != nil {
+ newDesc.err = desc.err
+ }
+ return newDesc
+}
diff --git a/agent/vendor/github.com/prometheus/client_model/LICENSE b/agent/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/agent/vendor/github.com/prometheus/client_model/NOTICE b/agent/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 00000000000..20110e410e5
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/agent/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/agent/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 00000000000..9805432c2a4
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,629 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: metrics.proto
+
+package io_prometheus_client // import "github.com/prometheus/client_model/go"
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+func (MetricType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
+func (m *LabelPair) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LabelPair.Unmarshal(m, b)
+}
+func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
+}
+func (dst *LabelPair) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelPair.Merge(dst, src)
+}
+func (m *LabelPair) XXX_Size() int {
+ return xxx_messageInfo_LabelPair.Size(m)
+}
+func (m *LabelPair) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelPair.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelPair proto.InternalMessageInfo
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+func (*Gauge) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
+}
+func (m *Gauge) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Gauge.Unmarshal(m, b)
+}
+func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
+}
+func (dst *Gauge) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Gauge.Merge(dst, src)
+}
+func (m *Gauge) XXX_Size() int {
+ return xxx_messageInfo_Gauge.Size(m)
+}
+func (m *Gauge) XXX_DiscardUnknown() {
+ xxx_messageInfo_Gauge.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Gauge proto.InternalMessageInfo
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+func (*Counter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
+}
+func (m *Counter) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Counter.Unmarshal(m, b)
+}
+func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
+}
+func (dst *Counter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Counter.Merge(dst, src)
+}
+func (m *Counter) XXX_Size() int {
+ return xxx_messageInfo_Counter.Size(m)
+}
+func (m *Counter) XXX_DiscardUnknown() {
+ xxx_messageInfo_Counter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Counter proto.InternalMessageInfo
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+func (*Quantile) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
+}
+func (m *Quantile) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Quantile.Unmarshal(m, b)
+}
+func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
+}
+func (dst *Quantile) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Quantile.Merge(dst, src)
+}
+func (m *Quantile) XXX_Size() int {
+ return xxx_messageInfo_Quantile.Size(m)
+}
+func (m *Quantile) XXX_DiscardUnknown() {
+ xxx_messageInfo_Quantile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Quantile proto.InternalMessageInfo
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+func (*Summary) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
+}
+func (m *Summary) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Summary.Unmarshal(m, b)
+}
+func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
+}
+func (dst *Summary) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Summary.Merge(dst, src)
+}
+func (m *Summary) XXX_Size() int {
+ return xxx_messageInfo_Summary.Size(m)
+}
+func (m *Summary) XXX_DiscardUnknown() {
+ xxx_messageInfo_Summary.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Summary proto.InternalMessageInfo
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+func (*Untyped) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
+}
+func (m *Untyped) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Untyped.Unmarshal(m, b)
+}
+func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
+}
+func (dst *Untyped) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Untyped.Merge(dst, src)
+}
+func (m *Untyped) XXX_Size() int {
+ return xxx_messageInfo_Untyped.Size(m)
+}
+func (m *Untyped) XXX_DiscardUnknown() {
+ xxx_messageInfo_Untyped.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Untyped proto.InternalMessageInfo
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
+}
+func (m *Histogram) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Histogram.Unmarshal(m, b)
+}
+func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
+}
+func (dst *Histogram) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Histogram.Merge(dst, src)
+}
+func (m *Histogram) XXX_Size() int {
+ return xxx_messageInfo_Histogram.Size(m)
+}
+func (m *Histogram) XXX_DiscardUnknown() {
+ xxx_messageInfo_Histogram.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Histogram proto.InternalMessageInfo
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+func (*Bucket) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
+}
+func (m *Bucket) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Bucket.Unmarshal(m, b)
+}
+func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
+}
+func (dst *Bucket) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Bucket.Merge(dst, src)
+}
+func (m *Bucket) XXX_Size() int {
+ return xxx_messageInfo_Bucket.Size(m)
+}
+func (m *Bucket) XXX_DiscardUnknown() {
+ xxx_messageInfo_Bucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Bucket proto.InternalMessageInfo
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+func (*Metric) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
+}
+func (m *Metric) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Metric.Unmarshal(m, b)
+}
+func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+}
+func (dst *Metric) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metric.Merge(dst, src)
+}
+func (m *Metric) XXX_Size() int {
+ return xxx_messageInfo_Metric.Size(m)
+}
+func (m *Metric) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metric.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metric proto.InternalMessageInfo
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
+}
+func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
+}
+func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
+}
+func (dst *MetricFamily) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetricFamily.Merge(dst, src)
+}
+func (m *MetricFamily) XXX_Size() int {
+ return xxx_messageInfo_MetricFamily.Size(m)
+}
+func (m *MetricFamily) XXX_DiscardUnknown() {
+ xxx_messageInfo_MetricFamily.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
+ proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
+ proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
+ proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
+ proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
+ proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
+ proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
+ proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
+ proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
+ proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
+
+func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
+
+var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
+ // 591 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
+ 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
+ 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
+ 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
+ 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
+ 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
+ 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
+ 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
+ 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
+ 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
+ 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
+ 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
+ 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
+ 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
+ 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
+ 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
+ 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
+ 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
+ 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
+ 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
+ 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
+ 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
+ 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
+ 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
+ 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
+ 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
+ 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
+ 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
+ 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
+ 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
+ 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
+ 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
+ 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
+ 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
+ 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
+ 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
+ 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
+}
diff --git a/agent/vendor/github.com/prometheus/client_model/ruby/LICENSE b/agent/vendor/github.com/prometheus/client_model/ruby/LICENSE
new file mode 100644
index 00000000000..11069edd790
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/client_model/ruby/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/agent/vendor/github.com/prometheus/common/LICENSE b/agent/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/agent/vendor/github.com/prometheus/common/NOTICE b/agent/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 00000000000..636a2c1a5e8
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/agent/vendor/github.com/prometheus/common/expfmt/decode.go b/agent/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 00000000000..c092723e84a
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,429 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+// DecodeOptions contains options used by the Decoder and in sample extraction.
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const textType = "text/plain"
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ if err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ err := sd.Dec.Decode(&sd.f)
+ if err != nil {
+ return err
+ }
+ *s, err = extractSamples(&sd.f, sd.Opts)
+ return err
+}
+
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurrs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occurred.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+ var (
+ all model.Vector
+ lastErr error
+ )
+ for _, f := range fams {
+ some, err := extractSamples(f, o)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ all = append(all, some...)
+ }
+ return all, lastErr
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f), nil
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f), nil
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f), nil
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f), nil
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f), nil
+ }
+ return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/agent/vendor/github.com/prometheus/common/expfmt/encode.go b/agent/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 00000000000..11839ed65ce
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/agent/vendor/github.com/prometheus/common/expfmt/expfmt.go b/agent/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 00000000000..c71bcb98167
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,38 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package expfmt contains tools for reading and writing Prometheus metrics.
+package expfmt
+
+// Format specifies the HTTP content type of the different wire protocols.
+type Format string
+
+// Constants to assemble the Content-Type values for the different wire protocols.
+const (
+ TextVersion = "0.0.4"
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = ``
+ FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/agent/vendor/github.com/prometheus/common/expfmt/fuzz.go b/agent/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 00000000000..dc2eedeefca
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/agent/vendor/github.com/prometheus/common/expfmt/text_create.go b/agent/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 00000000000..8e473d0fe92
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,468 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
+// implements it.
+type enhancedWriter interface {
+ io.Writer
+ WriteRune(r rune) (n int, err error)
+ WriteString(s string) (n int, err error)
+ WriteByte(c byte) error
+}
+
+const (
+ initialBufSize = 512
+ initialNumBufSize = 24
+)
+
+var (
+ bufPool = sync.Pool{
+ New: func() interface{} {
+ return bytes.NewBuffer(make([]byte, 0, initialBufSize))
+ },
+ }
+ numBufPool = sync.Pool{
+ New: func() interface{} {
+ b := make([]byte, 0, initialNumBufSize)
+ return &b
+ },
+ }
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
+// will result in invalid text format output.
+//
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return 0, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Try the interface upgrade. If it doesn't work, we'll use a
+ // bytes.Buffer from the sync.Pool and write out its content to out in a
+ // single go in the end.
+ w, ok := out.(enhancedWriter)
+ if !ok {
+ b := bufPool.Get().(*bytes.Buffer)
+ b.Reset()
+ w = b
+ defer func() {
+ bWritten, bErr := out.Write(b.Bytes())
+ written = bWritten
+ if err == nil {
+ err = bErr
+ }
+ bufPool.Put(b)
+ }()
+ }
+
+ var n int
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err = w.WriteString("# HELP ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = w.WriteString(name)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return
+ }
+ n, err = writeEscapedString(w, *in.Help, false)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return
+ }
+ }
+ n, err = w.WriteString("# TYPE ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = w.WriteString(name)
+ written += n
+ if err != nil {
+ return
+ }
+ metricType := in.GetType()
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ n, err = w.WriteString(" counter\n")
+ case dto.MetricType_GAUGE:
+ n, err = w.WriteString(" gauge\n")
+ case dto.MetricType_SUMMARY:
+ n, err = w.WriteString(" summary\n")
+ case dto.MetricType_UNTYPED:
+ n, err = w.WriteString(" untyped\n")
+ case dto.MetricType_HISTOGRAM:
+ n, err = w.WriteString(" histogram\n")
+ default:
+ return written, fmt.Errorf("unknown metric type %s", metricType.String())
+ }
+ written += n
+ if err != nil {
+ return
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ w, name, "", metric, "", 0,
+ metric.Counter.GetValue(),
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ w, name, "", metric, "", 0,
+ metric.Gauge.GetValue(),
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ w, name, "", metric, "", 0,
+ metric.Untyped.GetValue(),
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ w, name, "", metric,
+ model.QuantileLabel, q.GetQuantile(),
+ q.GetValue(),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ n, err = writeSample(
+ w, name, "_sum", metric, "", 0,
+ metric.Summary.GetSampleSum(),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeSample(
+ w, name, "_count", metric, "", 0,
+ float64(metric.Summary.GetSampleCount()),
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, b := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ w, name, "_bucket", metric,
+ model.BucketLabel, b.GetUpperBound(),
+ float64(b.GetCumulativeCount()),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ if math.IsInf(b.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ w, name, "_bucket", metric,
+ model.BucketLabel, math.Inf(+1),
+ float64(metric.Histogram.GetSampleCount()),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ n, err = writeSample(
+ w, name, "_sum", metric, "", 0,
+ metric.Histogram.GetSampleSum(),
+ )
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeSample(
+ w, name, "_count", metric, "", 0,
+ float64(metric.Histogram.GetSampleCount()),
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// writeSample writes a single sample in text format to w, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// with a float64 value (use empty string as label name if not required), and
+// the value. The function returns the number of bytes written and any error
+// encountered.
+func writeSample(
+ w enhancedWriter,
+ name, suffix string,
+ metric *dto.Metric,
+ additionalLabelName string, additionalLabelValue float64,
+ value float64,
+) (int, error) {
+ var written int
+ n, err := w.WriteString(name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if suffix != "" {
+ n, err = w.WriteString(suffix)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeLabelPairs(
+ w, metric.Label, additionalLabelName, additionalLabelValue,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeInt(w, *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// writeLabelPairs converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'w'. An empty slice in combination with an empty
+// string 'additionalLabelName' results in nothing being written. Otherwise, the
+// label pairs are written, escaped as required by the text format, and enclosed
+// in '{...}'. The function returns the number of bytes written and any error
+// encountered.
+func writeLabelPairs(
+ w enhancedWriter,
+ in []*dto.LabelPair,
+ additionalLabelName string, additionalLabelValue float64,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var (
+ written int
+ separator byte = '{'
+ )
+ for _, lp := range in {
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(lp.GetName())
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeEscapedString(w, lp.GetValue(), true)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(additionalLabelName)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, additionalLabelValue)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+var (
+ escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+ quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
+ if includeDoubleQuote {
+ return quotedEscaper.WriteString(w, v)
+ } else {
+ return escaper.WriteString(w, v)
+ }
+}
+
+// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
+// a few common cases for increased efficiency. For non-hardcoded cases, it uses
+// strconv.AppendFloat to avoid allocations, similar to writeInt.
+func writeFloat(w enhancedWriter, f float64) (int, error) {
+ switch {
+ case f == 1:
+ return 1, w.WriteByte('1')
+ case f == 0:
+ return 1, w.WriteByte('0')
+ case f == -1:
+ return w.WriteString("-1")
+ case math.IsNaN(f):
+ return w.WriteString("NaN")
+ case math.IsInf(f, +1):
+ return w.WriteString("+Inf")
+ case math.IsInf(f, -1):
+ return w.WriteString("-Inf")
+ default:
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
+ }
+}
+
+// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
+// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
+// allocations.
+func writeInt(w enhancedWriter, i int64) (int, error) {
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendInt((*bp)[:0], i, 10)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
+}
diff --git a/agent/vendor/github.com/prometheus/common/expfmt/text_parse.go b/agent/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 00000000000..ec3d86ba7ce
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,757 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// zero value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ // If p.err is io.EOF now, we have run into a premature end of the input
+ // stream. Turn this error into something nicer and more
+ // meaningful. (io.EOF is often used as a signal for the legitimate end
+ // of an input stream.)
+ if p.err == io.EOF {
+ p.parseError("unexpected end of input stream")
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ if !model.LabelValue(p.currentToken.String()).IsValid() {
+ p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' translates into '\', and '\n' into a line-feed character.
+// All other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/agent/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/agent/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 00000000000..7723656d58d
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/agent/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/agent/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 00000000000..648b38cb654
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/agent/vendor/github.com/prometheus/common/model/alert.go b/agent/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 00000000000..35e739c7ad2
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %s", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %s", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/agent/vendor/github.com/prometheus/common/model/fingerprinting.go b/agent/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 00000000000..fc4de4106e8
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/agent/vendor/github.com/prometheus/common/model/fnv.go b/agent/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 00000000000..038fc1c9003
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/agent/vendor/github.com/prometheus/common/model/labels.go b/agent/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 00000000000..41051a01a36
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,210 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE. This
+// method, however, does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/agent/vendor/github.com/prometheus/common/model/labelset.go b/agent/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 00000000000..6eda08a7395
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !ln.IsValid() {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/agent/vendor/github.com/prometheus/common/model/metric.go b/agent/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 00000000000..f7250909b9f
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,103 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ separator = []byte{0}
+ // MetricNameRE is a regular expression matching valid metric
+ // names. Note that the IsValidMetricName function performs the same
+ // check but faster than a match with this regular expression.
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := make(Metric, len(m))
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
+func IsValidMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/agent/vendor/github.com/prometheus/common/model/model.go b/agent/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 00000000000..a7b9691707e
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/agent/vendor/github.com/prometheus/common/model/signature.go b/agent/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 00000000000..8762b13c63d
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/agent/vendor/github.com/prometheus/common/model/silence.go b/agent/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 00000000000..bb99889d2cc
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definition in the Prometheus
+// eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %s", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/agent/vendor/github.com/prometheus/common/model/time.go b/agent/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 00000000000..46259b1f109
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,264 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes an interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+// Set implements pflag/flag.Value
+func (d *Duration) Set(s string) error {
+ var err error
+ *d, err = ParseDuration(s)
+ return err
+}
+
+// Type implements pflag.Value
+func (d *Duration) Type() string {
+ return "duration"
+}
+
+var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
+
+// ParseDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ var (
+ n, _ = strconv.Atoi(matches[1])
+ dur = time.Duration(n) * time.Millisecond
+ )
+ switch unit := matches[2]; unit {
+ case "y":
+ dur *= 1000 * 60 * 60 * 24 * 365
+ case "w":
+ dur *= 1000 * 60 * 60 * 24 * 7
+ case "d":
+ dur *= 1000 * 60 * 60 * 24
+ case "h":
+ dur *= 1000 * 60 * 60
+ case "m":
+ dur *= 1000 * 60
+ case "s":
+ dur *= 1000
+ case "ms":
+ // Value already correct
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+ var (
+ ms = int64(time.Duration(d) / time.Millisecond)
+ unit = "ms"
+ )
+ if ms == 0 {
+ return "0s"
+ }
+ factors := map[string]int64{
+ "y": 1000 * 60 * 60 * 24 * 365,
+ "w": 1000 * 60 * 60 * 24 * 7,
+ "d": 1000 * 60 * 60 * 24,
+ "h": 1000 * 60 * 60,
+ "m": 1000 * 60,
+ "s": 1000,
+ "ms": 1,
+ }
+
+ switch int64(0) {
+ case ms % factors["y"]:
+ unit = "y"
+ case ms % factors["w"]:
+ unit = "w"
+ case ms % factors["d"]:
+ unit = "d"
+ case ms % factors["h"]:
+ unit = "h"
+ case ms % factors["m"]:
+ unit = "m"
+ case ms % factors["s"]:
+ unit = "s"
+ }
+ return fmt.Sprintf("%v%v", ms/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/agent/vendor/github.com/prometheus/common/model/value.go b/agent/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 00000000000..c9d8fb1a283
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,416 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var (
+ // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+ // non-existing sample pair. It is a SamplePair with timestamp Earliest and
+ // value 0.0. Note that the natural zero value of SamplePair has a timestamp
+ // of 0, which is possible to appear in a real SamplePair and thus not
+ // suitable to signal a non-existing SamplePair.
+ ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+ // ZeroSample is the pseudo zero-value of Sample used to signal a
+ // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+ // and metric nil. Note that the natural zero value of Sample has a timestamp
+ // of 0, which is possible to appear in a real Sample and thus not suitable
+ // to signal a non-existing Sample.
+ ZeroSample = Sample{Timestamp: Earliest}
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// semantics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+
+ return s.Value.Equal(o.Value)
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/.gitignore b/agent/vendor/github.com/prometheus/procfs/.gitignore
new file mode 100644
index 00000000000..25e3659ab25
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/.gitignore
@@ -0,0 +1 @@
+/fixtures/
diff --git a/agent/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/agent/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 00000000000..40503edbf18
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull request,
+ addressing (with `@...`) the maintainer of this repository (see
+ [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/agent/vendor/github.com/prometheus/procfs/LICENSE b/agent/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/agent/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/agent/vendor/github.com/prometheus/procfs/MAINTAINERS.md
new file mode 100644
index 00000000000..35993c41c27
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -0,0 +1 @@
+* Tobias Schmidt
diff --git a/agent/vendor/github.com/prometheus/procfs/Makefile b/agent/vendor/github.com/prometheus/procfs/Makefile
new file mode 100644
index 00000000000..4d109839466
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,77 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ensure GOBIN is not set during build so that promu is installed to the correct path
+unexport GOBIN
+
+GO ?= go
+GOFMT ?= $(GO)fmt
+FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
+STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
+pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
+
+PREFIX ?= $(shell pwd)
+BIN_DIR ?= $(shell pwd)
+
+ifdef DEBUG
+ bindata_flags = -debug
+endif
+
+STATICCHECK_IGNORE =
+
+all: format staticcheck build test
+
+style:
+ @echo ">> checking code style"
+ @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
+
+check_license:
+ @echo ">> checking license header"
+ @./scripts/check_license.sh
+
+test: fixtures/.unpacked sysfs/fixtures/.unpacked
+ @echo ">> running all tests"
+ @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples)
+
+format:
+ @echo ">> formatting code"
+ @$(GO) fmt $(pkgs)
+
+vet:
+ @echo ">> vetting code"
+ @$(GO) vet $(pkgs)
+
+staticcheck: $(STATICCHECK)
+ @echo ">> running staticcheck"
+ @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
+
+%/.unpacked: %.ttar
+ ./ttar -C $(dir $*) -x -f $*.ttar
+ touch $@
+
+update_fixtures: fixtures.ttar sysfs/fixtures.ttar
+
+%fixtures.ttar: %/fixtures
+ rm -v $(dir $*)fixtures/.unpacked
+ ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/
+
+$(FIRST_GOPATH)/bin/staticcheck:
+ @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
+
+.PHONY: all style check_license format test vet staticcheck
+
+# Declaring the binaries at their default locations as PHONY targets is a hack
+# to ensure the latest version is downloaded on every make execution.
+# If this is not desired, copy/symlink these binaries to a different path and
+# set the respective environment variables.
+.PHONY: $(GOPATH)/bin/staticcheck
diff --git a/agent/vendor/github.com/prometheus/procfs/NOTICE b/agent/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 00000000000..53c5e9aa111
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/agent/vendor/github.com/prometheus/procfs/README.md b/agent/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 00000000000..2095494719b
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,11 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
+[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
diff --git a/agent/vendor/github.com/prometheus/procfs/buddyinfo.go b/agent/vendor/github.com/prometheus/procfs/buddyinfo.go
new file mode 100644
index 00000000000..d3a8268078c
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -0,0 +1,95 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// A BuddyInfo is the details parsed from /proc/buddyinfo.
+// The data is comprised of an array of free fragments of each size.
+// The sizes are 2^n*PAGE_SIZE, where n is the array index.
+type BuddyInfo struct {
+ Node string
+ Zone string
+ Sizes []float64
+}
+
+// NewBuddyInfo reads the buddyinfo statistics.
+func NewBuddyInfo() ([]BuddyInfo, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.NewBuddyInfo()
+}
+
+// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
+ file, err := os.Open(fs.Path("buddyinfo"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseBuddyInfo(file)
+}
+
+func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
+ var (
+ buddyInfo = []BuddyInfo{}
+ scanner = bufio.NewScanner(r)
+ bucketCount = -1
+ )
+
+ for scanner.Scan() {
+ var err error
+ line := scanner.Text()
+ parts := strings.Fields(line)
+
+ if len(parts) < 4 {
+ return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+ }
+
+ node := strings.TrimRight(parts[1], ",")
+ zone := strings.TrimRight(parts[3], ",")
+ arraySize := len(parts[4:])
+
+ if bucketCount == -1 {
+ bucketCount = arraySize
+ } else {
+ if bucketCount != arraySize {
+ return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+ }
+ }
+
+ sizes := make([]float64, arraySize)
+ for i := 0; i < arraySize; i++ {
+ sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
+ }
+ }
+
+ buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
+ }
+
+ return buddyInfo, scanner.Err()
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/doc.go b/agent/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 00000000000..e2acd6d40a6
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/agent/vendor/github.com/prometheus/procfs/fixtures.ttar b/agent/vendor/github.com/prometheus/procfs/fixtures.ttar
new file mode 100644
index 00000000000..13c831ef599
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/fixtures.ttar
@@ -0,0 +1,462 @@
+# Archive created by ttar -c -f fixtures.ttar fixtures/
+Directory: fixtures
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/cmdline
+Lines: 1
+vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/comm
+Lines: 1
+vim
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/cwd
+SymlinkTo: /usr/bin
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/exe
+SymlinkTo: /usr/bin/vim
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231/fd
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/0
+SymlinkTo: ../../symlinktargets/abc
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/1
+SymlinkTo: ../../symlinktargets/def
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/10
+SymlinkTo: ../../symlinktargets/xyz
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/2
+SymlinkTo: ../../symlinktargets/ghi
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/3
+SymlinkTo: ../../symlinktargets/uvw
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/io
+Lines: 7
+rchar: 750339
+wchar: 818609
+syscr: 7405
+syscw: 5245
+read_bytes: 1024
+write_bytes: 2048
+cancelled_write_bytes: -1024
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/limits
+Lines: 17
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 62898 62898 processes
+Max open files 2048 4096 files
+Max locked memory 65536 65536 bytes
+Max address space 8589934592 unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 62898 62898 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/mountstats
+Lines: 19
+device rootfs mounted on / with fstype rootfs
+device sysfs mounted on /sys with fstype sysfs
+device proc mounted on /proc with fstype proc
+device /dev/sda1 mounted on / with fstype ext4
+device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
+ opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none
+ age: 13968
+ caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
+ nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
+ sec: flavor=1,pseudoflavor=1
+ events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
+ bytes: 1207640230 0 0 0 1210214218 0 295483 0
+ RPC iostats version: 1.0 p/v: 100003/4 (nfs)
+ xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
+ per-op statistics
+ NULL: 0 0 0 0 0 0 0 0
+ READ: 1298 1298 0 207680 1210292152 6 79386 79407
+ WRITE: 0 0 0 0 0 0 0 0
+
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231/net
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/net/dev
+Lines: 4
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+ lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231/ns
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/ns/mnt
+SymlinkTo: mnt:[4026531840]
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/ns/net
+SymlinkTo: net:[4026531993]
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/root
+SymlinkTo: /
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/stat
+Lines: 1
+26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26232
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/cmdline
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/comm
+Lines: 1
+ata_sff
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/cwd
+SymlinkTo: /does/not/exist
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26232/fd
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/0
+SymlinkTo: ../../symlinktargets/abc
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/1
+SymlinkTo: ../../symlinktargets/def
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/2
+SymlinkTo: ../../symlinktargets/ghi
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/3
+SymlinkTo: ../../symlinktargets/uvw
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/4
+SymlinkTo: ../../symlinktargets/xyz
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/limits
+Lines: 17
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 29436 29436 processes
+Max open files 1024 4096 files
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 29436 29436 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/root
+SymlinkTo: /does/not/exist
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/stat
+Lines: 1
+33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26233
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26233/cmdline
+Lines: 1
+com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/584
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/584/stat
+Lines: 2
+1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
+#!/bin/cat /proc/self/stat
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo/short
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/buddyinfo/short/buddyinfo
+Lines: 3
+Node 0, zone
+Node 0, zone
+Node 0, zone
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo/sizemismatch
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/buddyinfo/sizemismatch/buddyinfo
+Lines: 3
+Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
+Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0
+Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo/valid
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/buddyinfo/valid/buddyinfo
+Lines: 3
+Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
+Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
+Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/fs
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/fs/xfs
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/fs/xfs/stat
+Lines: 23
+extent_alloc 92447 97589 92448 93751
+abt 0 0 0 0
+blk_map 1767055 188820 184891 92447 92448 2140766 0
+bmbt 0 0 0 0
+dir 185039 92447 92444 136422
+trans 706 944304 0
+ig 185045 58807 0 126238 0 33637 22
+log 2883 113448 9 17360 739
+push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
+xstrat 92447 0
+rw 107739 94045
+attr 4 0 0 0
+icluster 8677 7849 135802
+vnodes 92601 0 0 0 92444 92444 92444 0
+buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
+abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
+abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
+bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
+fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+qm 0 0 0 0 0 0 0 0
+xpc 399724544 92823103 86219234
+debug 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/mdstat
+Lines: 26
+Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
+md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9]
+ 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
+
+md127 : active raid1 sdi2[0] sdj2[1]
+ 312319552 blocks [2/2] [UU]
+
+md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1]
+ 248896 blocks [2/2] [UU]
+
+md4 : inactive raid1 sda3[0] sdb3[1]
+ 4883648 blocks [2/2] [UU]
+
+md6 : active raid1 sdb2[2] sda2[0]
+ 195310144 blocks [2/1] [U_]
+ [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md8 : active raid1 sdb1[1] sda1[0]
+ 195310144 blocks [2/2] [UU]
+ [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1]
+ 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
+ bitmap: 0/30 pages [0KB], 65536KB chunk
+
+unused devices:
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/net
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/dev
+Lines: 6
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0
+ lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0
+docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0
+ eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/ip_vs
+Lines: 21
+IP Virtual Server version 1.2.1 (size=4096)
+Prot LocalAddress:Port Scheduler Flags
+ -> RemoteAddress:Port Forward Weight ActiveConn InActConn
+TCP C0A80016:0CEA wlc
+ -> C0A85216:0CEA Tunnel 100 248 2
+ -> C0A85318:0CEA Tunnel 100 248 2
+ -> C0A85315:0CEA Tunnel 100 248 1
+TCP C0A80039:0CEA wlc
+ -> C0A85416:0CEA Tunnel 0 0 0
+ -> C0A85215:0CEA Tunnel 100 1499 0
+ -> C0A83215:0CEA Tunnel 100 1498 0
+TCP C0A80037:0CEA wlc
+ -> C0A8321A:0CEA Tunnel 0 0 0
+ -> C0A83120:0CEA Tunnel 100 0 0
+TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh
+ -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0
+ -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0
+ -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1
+FWM 10001000 wlc
+ -> C0A8321A:0CEA Route 0 0 1
+ -> C0A83215:0CEA Route 0 0 2
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/ip_vs_stats
+Lines: 6
+ Total Incoming Outgoing Incoming Outgoing
+ Conns Packets Packets Bytes Bytes
+ 16AA370 E33656E5 0 51D8C8883AB3 0
+
+ Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
+ 4 1FB3C 0 1282A8F 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/net/rpc
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/rpc/nfs
+Lines: 5
+net 18628 0 18628 6
+rpc 4329785 0 4338291
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
+proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/rpc/nfsd
+Lines: 11
+rc 0 6 18622
+fh 0 0 0 0 0
+io 157286400 0
+th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
+ra 32 0 0 0 0 0 0 0 0 0 0 0
+net 18628 0 18628 6
+rpc 18628 0 0 0 0
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
+proc4 2 2 10853
+proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/xfrm_stat
+Lines: 28
+XfrmInError 1
+XfrmInBufferError 2
+XfrmInHdrError 4
+XfrmInNoStates 3
+XfrmInStateProtoError 40
+XfrmInStateModeError 100
+XfrmInStateSeqError 6000
+XfrmInStateExpired 4
+XfrmInStateMismatch 23451
+XfrmInStateInvalid 55555
+XfrmInTmplMismatch 51
+XfrmInNoPols 65432
+XfrmInPolBlock 100
+XfrmInPolError 10000
+XfrmOutError 1000000
+XfrmOutBundleGenError 43321
+XfrmOutBundleCheckError 555
+XfrmOutNoStates 869
+XfrmOutStateProtoError 4542
+XfrmOutStateModeError 4
+XfrmOutStateSeqError 543
+XfrmOutStateExpired 565
+XfrmOutPolBlock 43456
+XfrmOutPolDead 7656
+XfrmOutPolError 1454
+XfrmFwdHdrError 6654
+XfrmOutStateInvalid 28765
+XfrmAcquireError 24532
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/self
+SymlinkTo: 26231
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/stat
+Lines: 16
+cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
+cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
+cpu1 47869 23 16474 1110787 591 0 46 0 0 0
+cpu2 46504 36 15916 1112321 441 0 326 0 0 0
+cpu3 47054 102 15683 1113230 533 0 60 0 0 0
+cpu4 28413 25 10776 1140321 217 0 8 0 0 0
+cpu5 29271 101 11586 1136270 672 0 30 0 0 0
+cpu6 29152 36 10276 1139721 319 0 29 0 0 0
+cpu7 29098 268 10164 1139282 555 0 31 0 0 0
+intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ctxt 38014093
+btime 1418183276
+processes 26442
+procs_running 2
+procs_blocked 1
+softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/symlinktargets
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/README
+Lines: 2
+This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
+They are otherwise ignored by the tests
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/abc
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/def
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/ghi
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/uvw
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/xyz
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/.unpacked
+Lines: 0
+Mode: 664
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/agent/vendor/github.com/prometheus/procfs/fs.go b/agent/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 00000000000..b6c6b2ce1f0
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,82 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/prometheus/procfs/nfs"
+ "github.com/prometheus/procfs/xfs"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+// Path returns the path of the given subsystem relative to the procfs root.
+func (fs FS) Path(p ...string) string {
+ return path.Join(append([]string{string(fs)}, p...)...)
+}
+
+// XFSStats retrieves XFS filesystem runtime statistics.
+func (fs FS) XFSStats() (*xfs.Stats, error) {
+ f, err := os.Open(fs.Path("fs/xfs/stat"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return xfs.ParseStats(f)
+}
+
+// NFSClientRPCStats retrieves NFS client RPC statistics.
+func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
+ f, err := os.Open(fs.Path("net/rpc/nfs"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return nfs.ParseClientRPCStats(f)
+}
+
+// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
+func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
+ f, err := os.Open(fs.Path("net/rpc/nfsd"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return nfs.ParseServerRPCStats(f)
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/internal/util/parse.go b/agent/vendor/github.com/prometheus/procfs/internal/util/parse.go
new file mode 100644
index 00000000000..2ff228e9d1f
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "io/ioutil"
+ "strconv"
+ "strings"
+)
+
+// ParseUint32s parses a slice of strings into a slice of uint32s.
+func ParseUint32s(ss []string) ([]uint32, error) {
+ us := make([]uint32, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, uint32(u))
+ }
+
+ return us, nil
+}
+
+// ParseUint64s parses a slice of strings into a slice of uint64s.
+func ParseUint64s(ss []string) ([]uint64, error) {
+ us := make([]uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, u)
+ }
+
+ return us, nil
+}
+
+// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
+func ReadUintFromFile(path string) (uint64, error) {
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go b/agent/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
new file mode 100644
index 00000000000..df0d567b780
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
@@ -0,0 +1,45 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package util
+
+import (
+ "bytes"
+ "os"
+ "syscall"
+)
+
+// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
+// https://github.com/prometheus/node_exporter/pull/728/files
+func SysReadFile(file string) (string, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ // On some machines, hwmon drivers are broken and return EAGAIN. This causes
+ // Go's ioutil.ReadFile implementation to poll forever.
+ //
+ // Since we either want to read data or bail immediately, do the simplest
+ // possible read using syscall directly.
+ b := make([]byte, 128)
+ n, err := syscall.Read(int(f.Fd()), b)
+ if err != nil {
+ return "", err
+ }
+
+ return string(bytes.TrimSpace(b[:n])), nil
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/ipvs.go b/agent/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 00000000000..e36d4a3bd08
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,259 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+ // Total count of connections.
+ Connections uint64
+ // Total incoming packages processed.
+ IncomingPackets uint64
+ // Total outgoing packages processed.
+ OutgoingPackets uint64
+ // Total incoming traffic.
+ IncomingBytes uint64
+ // Total outgoing traffic.
+ OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+ // The local (virtual) IP address.
+ LocalAddress net.IP
+ // The remote (real) IP address.
+ RemoteAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
+ // The remote (real) port.
+ RemotePort uint16
+ // The local firewall mark
+ LocalMark string
+ // The transport protocol (TCP, UDP).
+ Proto string
+ // The current number of active connections for this virtual/real address pair.
+ ActiveConn uint64
+ // The current number of inactive connections for this virtual/real address pair.
+ InactConn uint64
+ // The current weight of this virtual/real address pair.
+ Weight uint64
+}
+
+// NewIPVSStats reads the IPVS statistics.
+func NewIPVSStats() (IPVSStats, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return fs.NewIPVSStats()
+}
+
+// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) NewIPVSStats() (IPVSStats, error) {
+ file, err := os.Open(fs.Path("net/ip_vs_stats"))
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ defer file.Close()
+
+ return parseIPVSStats(file)
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+ var (
+ statContent []byte
+ statLines []string
+ statFields []string
+ stats IPVSStats
+ )
+
+ statContent, err := ioutil.ReadAll(file)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ statLines = strings.SplitN(string(statContent), "\n", 4)
+ if len(statLines) != 4 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+ }
+
+ statFields = strings.Fields(statLines[2])
+ if len(statFields) != 5 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+ }
+
+ stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return stats, nil
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
+func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return []IPVSBackendStatus{}, err
+ }
+
+ return fs.NewIPVSBackendStatus()
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := os.Open(fs.Path("net/ip_vs"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+ var (
+ status []IPVSBackendStatus
+ scanner = bufio.NewScanner(file)
+ proto string
+ localMark string
+ localAddress net.IP
+ localPort uint16
+ err error
+ )
+
+ for scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ if len(fields) == 0 {
+ continue
+ }
+ switch {
+ case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+ continue
+ case fields[0] == "TCP" || fields[0] == "UDP":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localMark = ""
+ localAddress, localPort, err = parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ case fields[0] == "FWM":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localMark = fields[1]
+ localAddress = nil
+ localPort = 0
+ case fields[0] == "->":
+ if len(fields) < 6 {
+ continue
+ }
+ remoteAddress, remotePort, err := parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ weight, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, IPVSBackendStatus{
+ LocalAddress: localAddress,
+ LocalPort: localPort,
+ LocalMark: localMark,
+ RemoteAddress: remoteAddress,
+ RemotePort: remotePort,
+ Proto: proto,
+ Weight: weight,
+ ActiveConn: activeConn,
+ InactConn: inactConn,
+ })
+ }
+ }
+ return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ var (
+ ip net.IP
+ err error
+ )
+
+ switch len(s) {
+ case 13:
+ ip, err = hex.DecodeString(s[0:8])
+ if err != nil {
+ return nil, 0, err
+ }
+ case 46:
+ ip = net.ParseIP(s[1:40])
+ if ip == nil {
+ return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+ }
+ default:
+ return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
+ }
+
+ portString := s[len(s)-4:]
+ if len(portString) != 4 {
+ return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
+ }
+ port, err := strconv.ParseUint(portString, 16, 16)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return ip, uint16(port), nil
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/mdstat.go b/agent/vendor/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 00000000000..9dc19583d8d
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,151 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+ buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+ // Name of the device.
+ Name string
+ // activity-state of the device.
+ ActivityState string
+ // Number of active disks.
+ DisksActive int64
+ // Total number of disks the device consists of.
+ DisksTotal int64
+ // Number of blocks the device holds.
+ BlocksTotal int64
+ // Number of blocks on the device that are in sync.
+ BlocksSynced int64
+}
+
+// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
+func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
+ mdStatusFilePath := fs.Path("mdstat")
+ content, err := ioutil.ReadFile(mdStatusFilePath)
+ if err != nil {
+ return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ mdStates := []MDStat{}
+ lines := strings.Split(string(content), "\n")
+ for i, l := range lines {
+ if l == "" {
+ continue
+ }
+ if l[0] == ' ' {
+ continue
+ }
+ if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
+ continue
+ }
+
+ mainLine := strings.Split(l, " ")
+ if len(mainLine) < 3 {
+ return mdStates, fmt.Errorf("error parsing mdline: %s", l)
+ }
+ mdName := mainLine[0]
+ activityState := mainLine[2]
+
+ if len(lines) <= i+3 {
+ return mdStates, fmt.Errorf(
+ "error parsing %s: too few lines for md device %s",
+ mdStatusFilePath,
+ mdName,
+ )
+ }
+
+ active, total, size, err := evalStatusline(lines[i+1])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ // j is the line number of the syncing-line.
+ j := i + 2
+ if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+ j = i + 3
+ }
+
+ // If device is syncing at the moment, get the number of currently
+ // synced bytes, otherwise that number equals the size of the device.
+ syncedBlocks := size
+ if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
+ syncedBlocks, err = evalBuildline(lines[j])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+ }
+
+ mdStates = append(mdStates, MDStat{
+ Name: mdName,
+ ActivityState: activityState,
+ DisksActive: active,
+ DisksTotal: total,
+ BlocksTotal: size,
+ BlocksSynced: syncedBlocks,
+ })
+ }
+
+ return mdStates, nil
+}
+
+func evalStatusline(statusline string) (active, total, size int64, err error) {
+ matches := statuslineRE.FindStringSubmatch(statusline)
+ if len(matches) != 4 {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
+ }
+
+ size, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ total, err = strconv.ParseInt(matches[2], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ active, err = strconv.ParseInt(matches[3], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ return active, total, size, nil
+}
+
+func evalBuildline(buildline string) (syncedBlocks int64, err error) {
+ matches := buildlineRE.FindStringSubmatch(buildline)
+ if len(matches) != 2 {
+ return 0, fmt.Errorf("unexpected buildline: %s", buildline)
+ }
+
+ syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
+ }
+
+ return syncedBlocks, nil
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/mountstats.go b/agent/vendor/github.com/prometheus/procfs/mountstats.go
new file mode 100644
index 00000000000..7a8a1e09901
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/mountstats.go
@@ -0,0 +1,606 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Constants shared between multiple functions.
+const (
+ deviceEntryLen = 8
+
+ fieldBytesLen = 8
+ fieldEventsLen = 27
+
+ statVersion10 = "1.0"
+ statVersion11 = "1.1"
+
+ fieldTransport10TCPLen = 10
+ fieldTransport10UDPLen = 7
+
+ fieldTransport11TCPLen = 13
+ fieldTransport11UDPLen = 10
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+ // Name of the device.
+ Device string
+ // The mount point of the device.
+ Mount string
+ // The filesystem type used by the device.
+ Type string
+ // If available additional statistics related to this Mount.
+ // Use a type assertion to determine if additional statistics are available.
+ Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+ mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+ // The version of statistics provided.
+ StatVersion string
+ // The age of the NFS mount.
+ Age time.Duration
+ // Statistics related to byte counters for various operations.
+ Bytes NFSBytesStats
+ // Statistics related to various NFS event occurrences.
+ Events NFSEventsStats
+ // Statistics broken down by filesystem operation.
+ Operations []NFSOperationStats
+ // Statistics about the NFS RPC transport.
+ Transport NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+ // Number of bytes read using the read() syscall.
+ Read uint64
+ // Number of bytes written using the write() syscall.
+ Write uint64
+ // Number of bytes read using the read() syscall in O_DIRECT mode.
+ DirectRead uint64
+ // Number of bytes written using the write() syscall in O_DIRECT mode.
+ DirectWrite uint64
+ // Number of bytes read from the NFS server, in total.
+ ReadTotal uint64
+ // Number of bytes written to the NFS server, in total.
+ WriteTotal uint64
+ // Number of pages read directly via mmap()'d files.
+ ReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+ // Number of times cached inode attributes are re-validated from the server.
+ InodeRevalidate uint64
+ // Number of times cached dentry nodes are re-validated from the server.
+ DnodeRevalidate uint64
+ // Number of times an inode cache is cleared.
+ DataInvalidate uint64
+ // Number of times cached inode attributes are invalidated.
+ AttributeInvalidate uint64
+ // Number of times files or directories have been open()'d.
+ VFSOpen uint64
+ // Number of times a directory lookup has occurred.
+ VFSLookup uint64
+ // Number of times permissions have been checked.
+ VFSAccess uint64
+ // Number of updates (and potential writes) to pages.
+ VFSUpdatePage uint64
+ // Number of pages read directly via mmap()'d files.
+ VFSReadPage uint64
+ // Number of times a group of pages have been read.
+ VFSReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ VFSWritePage uint64
+ // Number of times a group of pages have been written.
+ VFSWritePages uint64
+ // Number of times directory entries have been read with getdents().
+ VFSGetdents uint64
+ // Number of times attributes have been set on inodes.
+ VFSSetattr uint64
+ // Number of pending writes that have been forcefully flushed to the server.
+ VFSFlush uint64
+ // Number of times fsync() has been called on directories and files.
+ VFSFsync uint64
+ // Number of times locking has been attempted on a file.
+ VFSLock uint64
+ // Number of times files have been closed and released.
+ VFSFileRelease uint64
+ // Unknown. Possibly unused.
+ CongestionWait uint64
+ // Number of times files have been truncated.
+ Truncation uint64
+ // Number of times a file has been grown due to writes beyond its existing end.
+ WriteExtension uint64
+ // Number of times a file was removed while still open by another process.
+ SillyRename uint64
+ // Number of times the NFS server gave less data than expected while reading.
+ ShortRead uint64
+ // Number of times the NFS server wrote less data than expected while writing.
+ ShortWrite uint64
+ // Number of times the NFS server indicated EJUKEBOX; retrieving data from
+ // offline storage.
+ JukeboxDelay uint64
+ // Number of NFS v4.1+ pNFS reads.
+ PNFSRead uint64
+ // Number of NFS v4.1+ pNFS writes.
+ PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+ // The name of the operation.
+ Operation string
+ // Number of requests performed for this operation.
+ Requests uint64
+ // Number of times an actual RPC request has been transmitted for this operation.
+ Transmissions uint64
+ // Number of times a request has had a major timeout.
+ MajorTimeouts uint64
+ // Number of bytes sent for this operation, including RPC headers and payload.
+ BytesSent uint64
+ // Number of bytes received for this operation, including RPC headers and payload.
+ BytesReceived uint64
+ // Duration all requests spent queued for transmission before they were sent.
+ CumulativeQueueTime time.Duration
+ // Duration it took to get a reply back after the request was transmitted.
+ CumulativeTotalResponseTime time.Duration
+ // Duration from when a request was enqueued to when it was completely handled.
+ CumulativeTotalRequestTime time.Duration
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+ // The transport protocol used for the NFS mount.
+ Protocol string
+ // The local port used for the NFS mount.
+ Port uint64
+ // Number of times the client has had to establish a connection from scratch
+ // to the NFS server.
+ Bind uint64
+ // Number of times the client has made a TCP connection to the NFS server.
+ Connect uint64
+ // Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+ // spent waiting for connections to the server to be established.
+ ConnectIdleTime uint64
+ // Duration since the NFS mount last saw any RPC traffic.
+ IdleTime time.Duration
+ // Number of RPC requests for this mount sent to the NFS server.
+ Sends uint64
+ // Number of RPC responses for this mount received from the NFS server.
+ Receives uint64
+ // Number of times the NFS server sent a response with a transaction ID
+ // unknown to this client.
+ BadTransactionIDs uint64
+ // A running counter, incremented on each request as the current difference
+ // ebetween sends and receives.
+ CumulativeActiveRequests uint64
+ // A running counter, incremented on each request by the current backlog
+ // queue size.
+ CumulativeBacklog uint64
+
+ // Stats below only available with stat version 1.1.
+
+ // Maximum number of simultaneously active RPC requests ever used.
+ MaximumRPCSlotsUsed uint64
+ // A running counter, incremented on each request as the current size of the
+ // sending queue.
+ CumulativeSendingQueue uint64
+ // A running counter, incremented on each request as the current size of the
+ // pending queue.
+ CumulativePendingQueue uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+ const (
+ device = "device"
+ statVersionPrefix = "statvers="
+
+ nfs3Type = "nfs"
+ nfs4Type = "nfs4"
+ )
+
+ var mounts []*Mount
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Only look for device entries in this function
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 || ss[0] != device {
+ continue
+ }
+
+ m, err := parseMount(ss)
+ if err != nil {
+ return nil, err
+ }
+
+ // Does this mount also possess statistics information?
+ if len(ss) > deviceEntryLen {
+ // Only NFSv3 and v4 are supported for parsing statistics
+ if m.Type != nfs3Type && m.Type != nfs4Type {
+ return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+ }
+
+ statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+ stats, err := parseMountStatsNFS(s, statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ m.Stats = stats
+ }
+
+ mounts = append(mounts, m)
+ }
+
+ return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+// device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+ if len(ss) < deviceEntryLen {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+
+ // Check for specific words appearing at specific indices to ensure
+ // the format is consistent with what we expect
+ format := []struct {
+ i int
+ s string
+ }{
+ {i: 0, s: "device"},
+ {i: 2, s: "mounted"},
+ {i: 3, s: "on"},
+ {i: 5, s: "with"},
+ {i: 6, s: "fstype"},
+ }
+
+ for _, f := range format {
+ if ss[f.i] != f.s {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+ }
+
+ return &Mount{
+ Device: ss[1],
+ Mount: ss[4],
+ Type: ss[7],
+ }, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+ // Field indicators for parsing specific types of data
+ const (
+ fieldAge = "age:"
+ fieldBytes = "bytes:"
+ fieldEvents = "events:"
+ fieldPerOpStats = "per-op"
+ fieldTransport = "xprt:"
+ )
+
+ stats := &MountStatsNFS{
+ StatVersion: statVersion,
+ }
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ break
+ }
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ }
+
+ switch ss[0] {
+ case fieldAge:
+ // Age integer is in seconds
+ d, err := time.ParseDuration(ss[1] + "s")
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Age = d
+ case fieldBytes:
+ bstats, err := parseNFSBytesStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Bytes = *bstats
+ case fieldEvents:
+ estats, err := parseNFSEventsStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Events = *estats
+ case fieldTransport:
+ if len(ss) < 3 {
+ return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+ }
+
+ tstats, err := parseNFSTransportStats(ss[1:], statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Transport = *tstats
+ }
+
+ // When encountering "per-operation statistics", we must break this
+ // loop and parse them separately to ensure we can terminate parsing
+ // before reaching another device entry; hence why this 'if' statement
+ // is not just another switch case
+ if ss[0] == fieldPerOpStats {
+ break
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ // NFS per-operation stats appear last before the next device entry
+ perOpStats, err := parseNFSOperationStats(s)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Operations = perOpStats
+
+ return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+ if len(ss) != fieldBytesLen {
+ return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldBytesLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSBytesStats{
+ Read: ns[0],
+ Write: ns[1],
+ DirectRead: ns[2],
+ DirectWrite: ns[3],
+ ReadTotal: ns[4],
+ WriteTotal: ns[5],
+ ReadPages: ns[6],
+ WritePages: ns[7],
+ }, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+ if len(ss) != fieldEventsLen {
+ return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldEventsLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSEventsStats{
+ InodeRevalidate: ns[0],
+ DnodeRevalidate: ns[1],
+ DataInvalidate: ns[2],
+ AttributeInvalidate: ns[3],
+ VFSOpen: ns[4],
+ VFSLookup: ns[5],
+ VFSAccess: ns[6],
+ VFSUpdatePage: ns[7],
+ VFSReadPage: ns[8],
+ VFSReadPages: ns[9],
+ VFSWritePage: ns[10],
+ VFSWritePages: ns[11],
+ VFSGetdents: ns[12],
+ VFSSetattr: ns[13],
+ VFSFlush: ns[14],
+ VFSFsync: ns[15],
+ VFSLock: ns[16],
+ VFSFileRelease: ns[17],
+ CongestionWait: ns[18],
+ Truncation: ns[19],
+ WriteExtension: ns[20],
+ SillyRename: ns[21],
+ ShortRead: ns[22],
+ ShortWrite: ns[23],
+ JukeboxDelay: ns[24],
+ PNFSRead: ns[25],
+ PNFSWrite: ns[26],
+ }, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+ const (
+ // Number of expected fields in each per-operation statistics set
+ numFields = 9
+ )
+
+ var ops []NFSOperationStats
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ // Must break when reading a blank line after per-operation stats to
+ // enable top-level function to parse the next device entry
+ break
+ }
+
+ if len(ss) != numFields {
+ return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+ }
+
+ // Skip string operation name for integers
+ ns := make([]uint64, 0, numFields-1)
+ for _, st := range ss[1:] {
+ n, err := strconv.ParseUint(st, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ ops = append(ops, NFSOperationStats{
+ Operation: strings.TrimSuffix(ss[0], ":"),
+ Requests: ns[0],
+ Transmissions: ns[1],
+ MajorTimeouts: ns[2],
+ BytesSent: ns[3],
+ BytesReceived: ns[4],
+ CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
+ CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
+ CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
+ })
+ }
+
+ return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+ // Extract the protocol field. It is the only string value in the line
+ protocol := ss[0]
+ ss = ss[1:]
+
+ switch statVersion {
+ case statVersion10:
+ var expectedLength int
+ if protocol == "tcp" {
+ expectedLength = fieldTransport10TCPLen
+ } else if protocol == "udp" {
+ expectedLength = fieldTransport10UDPLen
+ } else {
+ return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
+ }
+ if len(ss) != expectedLength {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+ }
+ case statVersion11:
+ var expectedLength int
+ if protocol == "tcp" {
+ expectedLength = fieldTransport11TCPLen
+ } else if protocol == "udp" {
+ expectedLength = fieldTransport11UDPLen
+ } else {
+ return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
+ }
+ if len(ss) != expectedLength {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+ }
+ default:
+ return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+ }
+
+ // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+ // in a v1.0 response. Since the stat length is bigger for TCP stats, we use
+ // the TCP length here.
+ //
+ // Note: slice length must be set to length of v1.1 stats to avoid a panic when
+ // only v1.0 stats are present.
+ // See: https://github.com/prometheus/node_exporter/issues/571.
+ ns := make([]uint64, fieldTransport11TCPLen)
+ for i, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns[i] = n
+ }
+
+ // The fields differ depending on the transport protocol (TCP or UDP)
+ // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
+ //
+ // For the udp RPC transport there is no connection count, connect idle time,
+ // or idle time (fields #3, #4, and #5); all other fields are the same. So
+ // we set them to 0 here.
+ if protocol == "udp" {
+ ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+ }
+
+ return &NFSTransportStats{
+ Protocol: protocol,
+ Port: ns[0],
+ Bind: ns[1],
+ Connect: ns[2],
+ ConnectIdleTime: ns[3],
+ IdleTime: time.Duration(ns[4]) * time.Second,
+ Sends: ns[5],
+ Receives: ns[6],
+ BadTransactionIDs: ns[7],
+ CumulativeActiveRequests: ns[8],
+ CumulativeBacklog: ns[9],
+ MaximumRPCSlotsUsed: ns[10],
+ CumulativeSendingQueue: ns[11],
+ CumulativePendingQueue: ns[12],
+ }, nil
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/net_dev.go b/agent/vendor/github.com/prometheus/procfs/net_dev.go
new file mode 100644
index 00000000000..3f2523371ab
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/net_dev.go
@@ -0,0 +1,216 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
+type NetDevLine struct {
+ Name string `json:"name"` // The name of the interface.
+ RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
+ RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
+ RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
+ RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
+ RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
+ RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
+ RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
+ RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
+ TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
+ TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
+ TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
+ TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
+ TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
+ TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
+ TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
+ TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
+}
+
+// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
+// are interface names.
+type NetDev map[string]NetDevLine
+
+// NewNetDev returns kernel/system statistics read from /proc/net/dev.
+func NewNetDev() (NetDev, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.NewNetDev()
+}
+
+// NewNetDev returns kernel/system statistics read from /proc/net/dev.
+func (fs FS) NewNetDev() (NetDev, error) {
+ return newNetDev(fs.Path("net/dev"))
+}
+
+// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
+func (p Proc) NewNetDev() (NetDev, error) {
+ return newNetDev(p.path("net/dev"))
+}
+
+// newNetDev creates a new NetDev from the contents of the given file.
+func newNetDev(file string) (NetDev, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return NetDev{}, err
+ }
+ defer f.Close()
+
+ nd := NetDev{}
+ s := bufio.NewScanner(f)
+ for n := 0; s.Scan(); n++ {
+ // Skip the 2 header lines.
+ if n < 2 {
+ continue
+ }
+
+ line, err := nd.parseLine(s.Text())
+ if err != nil {
+ return nd, err
+ }
+
+ nd[line.Name] = *line
+ }
+
+ return nd, s.Err()
+}
+
+// parseLine parses a single line from the /proc/net/dev file. Header lines
+// must be filtered prior to calling this method.
+func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
+ parts := strings.SplitN(rawLine, ":", 2)
+ if len(parts) != 2 {
+ return nil, errors.New("invalid net/dev line, missing colon")
+ }
+ fields := strings.Fields(strings.TrimSpace(parts[1]))
+
+ var err error
+ line := &NetDevLine{}
+
+ // Interface Name
+ line.Name = strings.TrimSpace(parts[0])
+ if line.Name == "" {
+ return nil, errors.New("invalid net/dev line, empty interface name")
+ }
+
+ // RX
+ line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ // TX
+ line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return line, nil
+}
+
+// Total aggregates the values across interfaces and returns a new NetDevLine.
+// The Name field will be a sorted comma separated list of interface names.
+func (nd NetDev) Total() NetDevLine {
+ total := NetDevLine{}
+
+ names := make([]string, 0, len(nd))
+ for _, ifc := range nd {
+ names = append(names, ifc.Name)
+ total.RxBytes += ifc.RxBytes
+ total.RxPackets += ifc.RxPackets
+ total.RxPackets += ifc.RxPackets
+ total.RxErrors += ifc.RxErrors
+ total.RxDropped += ifc.RxDropped
+ total.RxFIFO += ifc.RxFIFO
+ total.RxFrame += ifc.RxFrame
+ total.RxCompressed += ifc.RxCompressed
+ total.RxMulticast += ifc.RxMulticast
+ total.TxBytes += ifc.TxBytes
+ total.TxPackets += ifc.TxPackets
+ total.TxErrors += ifc.TxErrors
+ total.TxDropped += ifc.TxDropped
+ total.TxFIFO += ifc.TxFIFO
+ total.TxCollisions += ifc.TxCollisions
+ total.TxCarrier += ifc.TxCarrier
+ total.TxCompressed += ifc.TxCompressed
+ }
+ sort.Strings(names)
+ total.Name = strings.Join(names, ", ")
+
+ return total
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/nfs/nfs.go b/agent/vendor/github.com/prometheus/procfs/nfs/nfs.go
new file mode 100644
index 00000000000..651bf681952
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/nfs/nfs.go
@@ -0,0 +1,263 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package nfs implements parsing of /proc/net/rpc/nfsd.
+// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
+package nfs
+
+// ReplyCache models the "rc" line.
+type ReplyCache struct {
+ Hits uint64
+ Misses uint64
+ NoCache uint64
+}
+
+// FileHandles models the "fh" line.
+type FileHandles struct {
+ Stale uint64
+ TotalLookups uint64
+ AnonLookups uint64
+ DirNoCache uint64
+ NoDirNoCache uint64
+}
+
+// InputOutput models the "io" line.
+type InputOutput struct {
+ Read uint64
+ Write uint64
+}
+
+// Threads models the "th" line.
+type Threads struct {
+ Threads uint64
+ FullCnt uint64
+}
+
+// ReadAheadCache models the "ra" line.
+type ReadAheadCache struct {
+ CacheSize uint64
+ CacheHistogram []uint64
+ NotFound uint64
+}
+
+// Network models the "net" line.
+type Network struct {
+ NetCount uint64
+ UDPCount uint64
+ TCPCount uint64
+ TCPConnect uint64
+}
+
+// ClientRPC models the nfs "rpc" line.
+type ClientRPC struct {
+ RPCCount uint64
+ Retransmissions uint64
+ AuthRefreshes uint64
+}
+
+// ServerRPC models the nfsd "rpc" line.
+type ServerRPC struct {
+ RPCCount uint64
+ BadCnt uint64
+ BadFmt uint64
+ BadAuth uint64
+ BadcInt uint64
+}
+
+// V2Stats models the "proc2" line.
+type V2Stats struct {
+ Null uint64
+ GetAttr uint64
+ SetAttr uint64
+ Root uint64
+ Lookup uint64
+ ReadLink uint64
+ Read uint64
+ WrCache uint64
+ Write uint64
+ Create uint64
+ Remove uint64
+ Rename uint64
+ Link uint64
+ SymLink uint64
+ MkDir uint64
+ RmDir uint64
+ ReadDir uint64
+ FsStat uint64
+}
+
+// V3Stats models the "proc3" line.
+type V3Stats struct {
+ Null uint64
+ GetAttr uint64
+ SetAttr uint64
+ Lookup uint64
+ Access uint64
+ ReadLink uint64
+ Read uint64
+ Write uint64
+ Create uint64
+ MkDir uint64
+ SymLink uint64
+ MkNod uint64
+ Remove uint64
+ RmDir uint64
+ Rename uint64
+ Link uint64
+ ReadDir uint64
+ ReadDirPlus uint64
+ FsStat uint64
+ FsInfo uint64
+ PathConf uint64
+ Commit uint64
+}
+
+// ClientV4Stats models the nfs "proc4" line.
+type ClientV4Stats struct {
+ Null uint64
+ Read uint64
+ Write uint64
+ Commit uint64
+ Open uint64
+ OpenConfirm uint64
+ OpenNoattr uint64
+ OpenDowngrade uint64
+ Close uint64
+ Setattr uint64
+ FsInfo uint64
+ Renew uint64
+ SetClientID uint64
+ SetClientIDConfirm uint64
+ Lock uint64
+ Lockt uint64
+ Locku uint64
+ Access uint64
+ Getattr uint64
+ Lookup uint64
+ LookupRoot uint64
+ Remove uint64
+ Rename uint64
+ Link uint64
+ Symlink uint64
+ Create uint64
+ Pathconf uint64
+ StatFs uint64
+ ReadLink uint64
+ ReadDir uint64
+ ServerCaps uint64
+ DelegReturn uint64
+ GetACL uint64
+ SetACL uint64
+ FsLocations uint64
+ ReleaseLockowner uint64
+ Secinfo uint64
+ FsidPresent uint64
+ ExchangeID uint64
+ CreateSession uint64
+ DestroySession uint64
+ Sequence uint64
+ GetLeaseTime uint64
+ ReclaimComplete uint64
+ LayoutGet uint64
+ GetDeviceInfo uint64
+ LayoutCommit uint64
+ LayoutReturn uint64
+ SecinfoNoName uint64
+ TestStateID uint64
+ FreeStateID uint64
+ GetDeviceList uint64
+ BindConnToSession uint64
+ DestroyClientID uint64
+ Seek uint64
+ Allocate uint64
+ DeAllocate uint64
+ LayoutStats uint64
+ Clone uint64
+}
+
+// ServerV4Stats models the nfsd "proc4" line.
+type ServerV4Stats struct {
+ Null uint64
+ Compound uint64
+}
+
+// V4Ops models the "proc4ops" line: NFSv4 operations
+// Variable list, see:
+// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
+// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
+// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
+type V4Ops struct {
+ //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
+ Op0Unused uint64
+ Op1Unused uint64
+ Op2Future uint64
+ Access uint64
+ Close uint64
+ Commit uint64
+ Create uint64
+ DelegPurge uint64
+ DelegReturn uint64
+ GetAttr uint64
+ GetFH uint64
+ Link uint64
+ Lock uint64
+ Lockt uint64
+ Locku uint64
+ Lookup uint64
+ LookupRoot uint64
+ Nverify uint64
+ Open uint64
+ OpenAttr uint64
+ OpenConfirm uint64
+ OpenDgrd uint64
+ PutFH uint64
+ PutPubFH uint64
+ PutRootFH uint64
+ Read uint64
+ ReadDir uint64
+ ReadLink uint64
+ Remove uint64
+ Rename uint64
+ Renew uint64
+ RestoreFH uint64
+ SaveFH uint64
+ SecInfo uint64
+ SetAttr uint64
+ Verify uint64
+ Write uint64
+ RelLockOwner uint64
+}
+
+// ClientRPCStats models all stats from /proc/net/rpc/nfs.
+type ClientRPCStats struct {
+ Network Network
+ ClientRPC ClientRPC
+ V2Stats V2Stats
+ V3Stats V3Stats
+ ClientV4Stats ClientV4Stats
+}
+
+// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
+type ServerRPCStats struct {
+ ReplyCache ReplyCache
+ FileHandles FileHandles
+ InputOutput InputOutput
+ Threads Threads
+ ReadAheadCache ReadAheadCache
+ Network Network
+ ServerRPC ServerRPC
+ V2Stats V2Stats
+ V3Stats V3Stats
+ ServerV4Stats ServerV4Stats
+ V4Ops V4Ops
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/nfs/parse.go b/agent/vendor/github.com/prometheus/procfs/nfs/parse.go
new file mode 100644
index 00000000000..95a83cc5bc5
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/nfs/parse.go
@@ -0,0 +1,317 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "fmt"
+)
+
+func parseReplyCache(v []uint64) (ReplyCache, error) {
+ if len(v) != 3 {
+ return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
+ }
+
+ return ReplyCache{
+ Hits: v[0],
+ Misses: v[1],
+ NoCache: v[2],
+ }, nil
+}
+
+func parseFileHandles(v []uint64) (FileHandles, error) {
+ if len(v) != 5 {
+ return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
+ }
+
+ return FileHandles{
+ Stale: v[0],
+ TotalLookups: v[1],
+ AnonLookups: v[2],
+ DirNoCache: v[3],
+ NoDirNoCache: v[4],
+ }, nil
+}
+
+func parseInputOutput(v []uint64) (InputOutput, error) {
+ if len(v) != 2 {
+ return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
+ }
+
+ return InputOutput{
+ Read: v[0],
+ Write: v[1],
+ }, nil
+}
+
+func parseThreads(v []uint64) (Threads, error) {
+ if len(v) != 2 {
+ return Threads{}, fmt.Errorf("invalid Threads line %q", v)
+ }
+
+ return Threads{
+ Threads: v[0],
+ FullCnt: v[1],
+ }, nil
+}
+
+func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
+ if len(v) != 12 {
+ return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
+ }
+
+ return ReadAheadCache{
+ CacheSize: v[0],
+ CacheHistogram: v[1:11],
+ NotFound: v[11],
+ }, nil
+}
+
+func parseNetwork(v []uint64) (Network, error) {
+ if len(v) != 4 {
+ return Network{}, fmt.Errorf("invalid Network line %q", v)
+ }
+
+ return Network{
+ NetCount: v[0],
+ UDPCount: v[1],
+ TCPCount: v[2],
+ TCPConnect: v[3],
+ }, nil
+}
+
+func parseServerRPC(v []uint64) (ServerRPC, error) {
+ if len(v) != 5 {
+ return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
+ }
+
+ return ServerRPC{
+ RPCCount: v[0],
+ BadCnt: v[1],
+ BadFmt: v[2],
+ BadAuth: v[3],
+ BadcInt: v[4],
+ }, nil
+}
+
+func parseClientRPC(v []uint64) (ClientRPC, error) {
+ if len(v) != 3 {
+ return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
+ }
+
+ return ClientRPC{
+ RPCCount: v[0],
+ Retransmissions: v[1],
+ AuthRefreshes: v[2],
+ }, nil
+}
+
+func parseV2Stats(v []uint64) (V2Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 18 {
+ return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
+ }
+
+ return V2Stats{
+ Null: v[1],
+ GetAttr: v[2],
+ SetAttr: v[3],
+ Root: v[4],
+ Lookup: v[5],
+ ReadLink: v[6],
+ Read: v[7],
+ WrCache: v[8],
+ Write: v[9],
+ Create: v[10],
+ Remove: v[11],
+ Rename: v[12],
+ Link: v[13],
+ SymLink: v[14],
+ MkDir: v[15],
+ RmDir: v[16],
+ ReadDir: v[17],
+ FsStat: v[18],
+ }, nil
+}
+
+func parseV3Stats(v []uint64) (V3Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 22 {
+ return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
+ }
+
+ return V3Stats{
+ Null: v[1],
+ GetAttr: v[2],
+ SetAttr: v[3],
+ Lookup: v[4],
+ Access: v[5],
+ ReadLink: v[6],
+ Read: v[7],
+ Write: v[8],
+ Create: v[9],
+ MkDir: v[10],
+ SymLink: v[11],
+ MkNod: v[12],
+ Remove: v[13],
+ RmDir: v[14],
+ Rename: v[15],
+ Link: v[16],
+ ReadDir: v[17],
+ ReadDirPlus: v[18],
+ FsStat: v[19],
+ FsInfo: v[20],
+ PathConf: v[21],
+ Commit: v[22],
+ }, nil
+}
+
+func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values {
+ return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v)
+ }
+
+ // This function currently supports mapping 59 NFS v4 client stats. Older
+ // kernels may emit fewer stats, so we must detect this and pad out the
+ // values to match the expected slice size.
+ if values < 59 {
+ newValues := make([]uint64, 60)
+ copy(newValues, v)
+ v = newValues
+ }
+
+ return ClientV4Stats{
+ Null: v[1],
+ Read: v[2],
+ Write: v[3],
+ Commit: v[4],
+ Open: v[5],
+ OpenConfirm: v[6],
+ OpenNoattr: v[7],
+ OpenDowngrade: v[8],
+ Close: v[9],
+ Setattr: v[10],
+ FsInfo: v[11],
+ Renew: v[12],
+ SetClientID: v[13],
+ SetClientIDConfirm: v[14],
+ Lock: v[15],
+ Lockt: v[16],
+ Locku: v[17],
+ Access: v[18],
+ Getattr: v[19],
+ Lookup: v[20],
+ LookupRoot: v[21],
+ Remove: v[22],
+ Rename: v[23],
+ Link: v[24],
+ Symlink: v[25],
+ Create: v[26],
+ Pathconf: v[27],
+ StatFs: v[28],
+ ReadLink: v[29],
+ ReadDir: v[30],
+ ServerCaps: v[31],
+ DelegReturn: v[32],
+ GetACL: v[33],
+ SetACL: v[34],
+ FsLocations: v[35],
+ ReleaseLockowner: v[36],
+ Secinfo: v[37],
+ FsidPresent: v[38],
+ ExchangeID: v[39],
+ CreateSession: v[40],
+ DestroySession: v[41],
+ Sequence: v[42],
+ GetLeaseTime: v[43],
+ ReclaimComplete: v[44],
+ LayoutGet: v[45],
+ GetDeviceInfo: v[46],
+ LayoutCommit: v[47],
+ LayoutReturn: v[48],
+ SecinfoNoName: v[49],
+ TestStateID: v[50],
+ FreeStateID: v[51],
+ GetDeviceList: v[52],
+ BindConnToSession: v[53],
+ DestroyClientID: v[54],
+ Seek: v[55],
+ Allocate: v[56],
+ DeAllocate: v[57],
+ LayoutStats: v[58],
+ Clone: v[59],
+ }, nil
+}
+
+func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 2 {
+ return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
+ }
+
+ return ServerV4Stats{
+ Null: v[1],
+ Compound: v[2],
+ }, nil
+}
+
+func parseV4Ops(v []uint64) (V4Ops, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values < 39 {
+ return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
+ }
+
+ stats := V4Ops{
+ Op0Unused: v[1],
+ Op1Unused: v[2],
+ Op2Future: v[3],
+ Access: v[4],
+ Close: v[5],
+ Commit: v[6],
+ Create: v[7],
+ DelegPurge: v[8],
+ DelegReturn: v[9],
+ GetAttr: v[10],
+ GetFH: v[11],
+ Link: v[12],
+ Lock: v[13],
+ Lockt: v[14],
+ Locku: v[15],
+ Lookup: v[16],
+ LookupRoot: v[17],
+ Nverify: v[18],
+ Open: v[19],
+ OpenAttr: v[20],
+ OpenConfirm: v[21],
+ OpenDgrd: v[22],
+ PutFH: v[23],
+ PutPubFH: v[24],
+ PutRootFH: v[25],
+ Read: v[26],
+ ReadDir: v[27],
+ ReadLink: v[28],
+ Remove: v[29],
+ Rename: v[30],
+ Renew: v[31],
+ RestoreFH: v[32],
+ SaveFH: v[33],
+ SecInfo: v[34],
+ SetAttr: v[35],
+ Verify: v[36],
+ Write: v[37],
+ RelLockOwner: v[38],
+ }
+
+ return stats, nil
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/agent/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
new file mode 100644
index 00000000000..c0d3a5ad9bd
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
@@ -0,0 +1,67 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
+func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
+ stats := &ClientRPCStats{}
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid NFS metric line %q", line)
+ }
+
+ values, err := util.ParseUint64s(parts[1:])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing NFS metric line: %s", err)
+ }
+
+ switch metricLine := parts[0]; metricLine {
+ case "net":
+ stats.Network, err = parseNetwork(values)
+ case "rpc":
+ stats.ClientRPC, err = parseClientRPC(values)
+ case "proc2":
+ stats.V2Stats, err = parseV2Stats(values)
+ case "proc3":
+ stats.V3Stats, err = parseV3Stats(values)
+ case "proc4":
+ stats.ClientV4Stats, err = parseClientV4Stats(values)
+ default:
+ return nil, fmt.Errorf("unknown NFS metric line %q", metricLine)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("errors parsing NFS metric line: %s", err)
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning NFS file: %s", err)
+ }
+
+ return stats, nil
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/agent/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
new file mode 100644
index 00000000000..57bb4a35858
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
@@ -0,0 +1,89 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
+func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
+ stats := &ServerRPCStats{}
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid NFSd metric line %q", line)
+ }
+ label := parts[0]
+
+ var values []uint64
+ var err error
+ if label == "th" {
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
+ }
+ values, err = util.ParseUint64s(parts[1:3])
+ } else {
+ values, err = util.ParseUint64s(parts[1:])
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
+ }
+
+ switch metricLine := parts[0]; metricLine {
+ case "rc":
+ stats.ReplyCache, err = parseReplyCache(values)
+ case "fh":
+ stats.FileHandles, err = parseFileHandles(values)
+ case "io":
+ stats.InputOutput, err = parseInputOutput(values)
+ case "th":
+ stats.Threads, err = parseThreads(values)
+ case "ra":
+ stats.ReadAheadCache, err = parseReadAheadCache(values)
+ case "net":
+ stats.Network, err = parseNetwork(values)
+ case "rpc":
+ stats.ServerRPC, err = parseServerRPC(values)
+ case "proc2":
+ stats.V2Stats, err = parseV2Stats(values)
+ case "proc3":
+ stats.V3Stats, err = parseV3Stats(values)
+ case "proc4":
+ stats.ServerV4Stats, err = parseServerV4Stats(values)
+ case "proc4ops":
+ stats.V4Ops, err = parseV4Ops(values)
+ default:
+ return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning NFSd file: %s", err)
+ }
+
+ return stats, nil
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/proc.go b/agent/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 00000000000..06bed0ef4a3
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,258 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently available processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+ p, err := os.Readlink(fs.Path("self"))
+ if err != nil {
+ return Proc{}, err
+ }
+ pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently available processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := os.Open(fs.Path())
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := os.Open(p.path("cmdline"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < 1 {
+ return []string{}, nil
+ }
+
+ return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+ f, err := os.Open(p.path("comm"))
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+ exe, err := os.Readlink(p.path("exe"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return exe, err
+}
+
+// Cwd returns the absolute path to the current working directory of the process.
+func (p Proc) Cwd() (string, error) {
+ wd, err := os.Readlink(p.path("cwd"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return wd, err
+}
+
+// RootDir returns the absolute path to the process's root directory (as set by chroot)
+func (p Proc) RootDir() (string, error) {
+ rdir, err := os.Readlink(p.path("root"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return rdir, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ targets := make([]string, len(names))
+
+ for i, name := range names {
+ target, err := os.Readlink(p.path("fd", name))
+ if err == nil {
+ targets[i] = target
+ }
+ }
+
+ return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+// MountStats retrieves statistics and configuration for mount points in a
+// process's namespace.
+func (p Proc) MountStats() ([]*Mount, error) {
+ f, err := os.Open(p.path("mountstats"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseMountStats(f)
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := os.Open(p.path("fd"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+ return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/proc_io.go b/agent/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 00000000000..0251c83bfe8
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,65 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// ProcIO models the content of /proc//io.
+type ProcIO struct {
+ // Chars read.
+ RChar uint64
+ // Chars written.
+ WChar uint64
+ // Read syscalls.
+ SyscR uint64
+ // Write syscalls.
+ SyscW uint64
+ // Bytes read.
+ ReadBytes uint64
+ // Bytes written.
+ WriteBytes uint64
+ // Bytes written, but taking into account truncation. See
+ // Documentation/filesystems/proc.txt in the kernel sources for
+ // detailed explanation.
+ CancelledWriteBytes int64
+}
+
+// NewIO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) NewIO() (ProcIO, error) {
+ pio := ProcIO{}
+
+ f, err := os.Open(p.path("io"))
+ if err != nil {
+ return pio, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return pio, err
+ }
+
+ ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+ "read_bytes: %d\nwrite_bytes: %d\n" +
+ "cancelled_write_bytes: %d\n"
+
+ _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+ &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+
+ return pio, err
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/proc_limits.go b/agent/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 00000000000..f04ba6fda85
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,150 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+ // CPU time limit in seconds.
+ CPUTime int64
+ // Maximum size of files that the process may create.
+ FileSize int64
+ // Maximum size of the process's data segment (initialized data,
+ // uninitialized data, and heap).
+ DataSize int64
+ // Maximum size of the process stack in bytes.
+ StackSize int64
+ // Maximum size of a core file.
+ CoreFileSize int64
+ // Limit of the process's resident set in pages.
+ ResidentSet int64
+ // Maximum number of processes that can be created for the real user ID of
+ // the calling process.
+ Processes int64
+ // Value one greater than the maximum file descriptor number that can be
+ // opened by this process.
+ OpenFiles int64
+ // Maximum number of bytes of memory that may be locked into RAM.
+ LockedMemory int64
+ // Maximum size of the process's virtual memory address space in bytes.
+ AddressSpace int64
+ // Limit on the combined number of flock(2) locks and fcntl(2) leases that
+ // this process may establish.
+ FileLocks int64
+ // Limit of signals that may be queued for the real user ID of the calling
+ // process.
+ PendingSignals int64
+ // Limit on the number of bytes that can be allocated for POSIX message
+ // queues for the real user ID of the calling process.
+ MsqqueueSize int64
+ // Limit of the nice priority set using setpriority(2) or nice(2).
+ NicePriority int64
+ // Limit of the real-time priority set using sched_setscheduler(2) or
+ // sched_setparam(2).
+ RealtimePriority int64
+ // Limit (in microseconds) on the amount of CPU time that a process
+ // scheduled under a real-time scheduling policy may consume without making
+ // a blocking system call.
+ RealtimeTimeout int64
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := os.Open(p.path("limits"))
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileSize, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int64, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return i, nil
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/proc_ns.go b/agent/vendor/github.com/prometheus/procfs/proc_ns.go
new file mode 100644
index 00000000000..d06c26ebad9
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Namespace represents a single namespace of a process.
+type Namespace struct {
+ Type string // Namespace type.
+ Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
+}
+
+// Namespaces contains all of the namespaces that the process is contained in.
+type Namespaces map[string]Namespace
+
+// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
+// process is a member.
+func (p Proc) NewNamespaces() (Namespaces, error) {
+ d, err := os.Open(p.path("ns"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
+ }
+
+ ns := make(Namespaces, len(names))
+ for _, name := range names {
+ target, err := os.Readlink(p.path("ns", name))
+ if err != nil {
+ return nil, err
+ }
+
+ fields := strings.SplitN(target, ":", 2)
+ if len(fields) != 2 {
+ return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
+ }
+
+ typ := fields[0]
+ inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
+ }
+
+ ns[name] = Namespace{typ, uint32(inode)}
+ }
+
+ return ns, nil
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/proc_stat.go b/agent/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 00000000000..3cf2a9f18f0
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,188 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic. After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := os.Open(p.path("stat"))
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/stat.go b/agent/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 00000000000..61eb6b0e3ce
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,232 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// CPUStat shows how much time the cpu spend in various stages.
+type CPUStat struct {
+ User float64
+ Nice float64
+ System float64
+ Idle float64
+ Iowait float64
+ IRQ float64
+ SoftIRQ float64
+ Steal float64
+ Guest float64
+ GuestNice float64
+}
+
+// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
+// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
+// It is possible to get per-cpu stats by reading /proc/softirqs
+type SoftIRQStat struct {
+ Hi uint64
+ Timer uint64
+ NetTx uint64
+ NetRx uint64
+ Block uint64
+ BlockIoPoll uint64
+ Tasklet uint64
+ Sched uint64
+ Hrtimer uint64
+ Rcu uint64
+}
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime uint64
+ // Summed up cpu statistics.
+ CPUTotal CPUStat
+ // Per-CPU statistics.
+ CPU []CPUStat
+ // Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
+ IRQTotal uint64
+ // Number of times a numbered IRQ was triggered.
+ IRQ []uint64
+ // Number of times a context switch happened.
+ ContextSwitches uint64
+ // Number of times a process was created.
+ ProcessCreated uint64
+ // Number of processes currently running.
+ ProcessesRunning uint64
+ // Number of processes currently blocked (waiting for IO).
+ ProcessesBlocked uint64
+ // Number of times a softirq was scheduled.
+ SoftIRQTotal uint64
+ // Detailed softirq statistics.
+ SoftIRQ SoftIRQStat
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
+func parseCPUStat(line string) (CPUStat, int64, error) {
+ cpuStat := CPUStat{}
+ var cpu string
+
+ count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
+ &cpu,
+ &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
+ &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
+ &cpuStat.Guest, &cpuStat.GuestNice)
+
+ if err != nil && err != io.EOF {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
+ }
+ if count == 0 {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
+ }
+
+ cpuStat.User /= userHZ
+ cpuStat.Nice /= userHZ
+ cpuStat.System /= userHZ
+ cpuStat.Idle /= userHZ
+ cpuStat.Iowait /= userHZ
+ cpuStat.IRQ /= userHZ
+ cpuStat.SoftIRQ /= userHZ
+ cpuStat.Steal /= userHZ
+ cpuStat.Guest /= userHZ
+ cpuStat.GuestNice /= userHZ
+
+ if cpu == "cpu" {
+ return cpuStat, -1, nil
+ }
+
+ cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
+ if err != nil {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
+ }
+
+ return cpuStat, cpuID, nil
+}
+
+// Parse a softirq line.
+func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
+ softIRQStat := SoftIRQStat{}
+ var total uint64
+ var prefix string
+
+ _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
+ &prefix, &total,
+ &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
+ &softIRQStat.Block, &softIRQStat.BlockIoPoll,
+ &softIRQStat.Tasklet, &softIRQStat.Sched,
+ &softIRQStat.Hrtimer, &softIRQStat.Rcu)
+
+ if err != nil {
+ return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
+ }
+
+ return softIRQStat, total, nil
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+
+ f, err := os.Open(fs.Path("stat"))
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ stat := Stat{}
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least
+ if len(parts) < 2 {
+ continue
+ }
+ switch {
+ case parts[0] == "btime":
+ if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
+ }
+ case parts[0] == "intr":
+ if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
+ }
+ numberedIRQs := parts[2:]
+ stat.IRQ = make([]uint64, len(numberedIRQs))
+ for i, count := range numberedIRQs {
+ if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
+ }
+ }
+ case parts[0] == "ctxt":
+ if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
+ }
+ case parts[0] == "processes":
+ if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
+ }
+ case parts[0] == "procs_running":
+ if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
+ }
+ case parts[0] == "procs_blocked":
+ if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
+ }
+ case parts[0] == "softirq":
+ softIRQStats, total, err := parseSoftIRQStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ stat.SoftIRQTotal = total
+ stat.SoftIRQ = softIRQStats
+ case strings.HasPrefix(parts[0], "cpu"):
+ cpuStat, cpuID, err := parseCPUStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ if cpuID == -1 {
+ stat.CPUTotal = cpuStat
+ } else {
+ for int64(len(stat.CPU)) <= cpuID {
+ stat.CPU = append(stat.CPU, CPUStat{})
+ }
+ stat.CPU[cpuID] = cpuStat
+ }
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return stat, nil
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/ttar b/agent/vendor/github.com/prometheus/procfs/ttar
new file mode 100755
index 00000000000..b0171a12b59
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/ttar
@@ -0,0 +1,389 @@
+#!/usr/bin/env bash
+
+# Purpose: plain text tar format
+# Limitations: - only suitable for text files, directories, and symlinks
+# - stores only filename, content, and mode
+# - not designed for untrusted input
+#
+# Note: must work with bash version 3.2 (macOS)
+
+# Copyright 2017 Roger Luethi
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit -o nounset
+
+# Sanitize environment (for instance, standard sorting of glob matches)
+export LC_ALL=C
+
+path=""
+CMD=""
+ARG_STRING="$*"
+
+#------------------------------------------------------------------------------
+# Not all sed implementations can work on null bytes. In order to make ttar
+# work out of the box on macOS, use Python as a stream editor.
+
+USE_PYTHON=0
+
+PYTHON_CREATE_FILTER=$(cat << 'PCF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+ line = re.sub(r'EOF', r'\EOF', line)
+ line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
+ line = re.sub('\x00', r'NULLBYTE', line)
+ sys.stdout.write(line)
+PCF
+)
+
+PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+ line = re.sub(r'(?/dev/null; then
+ echo "ERROR Python not found. Aborting."
+ exit 2
+ fi
+ USE_PYTHON=1
+ fi
+}
+
+#------------------------------------------------------------------------------
+
+function usage {
+ bname=$(basename "$0")
+ cat << USAGE
+Usage: $bname [-C ] -c -f (create archive)
+ $bname -t -f (list archive contents)
+ $bname [-C ] -x -f (extract archive)
+
+Options:
+ -C (change directory)
+ -v (verbose)
+
+Example: Change to sysfs directory, create ttar file from fixtures directory
+ $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
+USAGE
+exit "$1"
+}
+
+function vecho {
+ if [ "${VERBOSE:-}" == "yes" ]; then
+ echo >&7 "$@"
+ fi
+}
+
+function set_cmd {
+ if [ -n "$CMD" ]; then
+ echo "ERROR: more than one command given"
+ echo
+ usage 2
+ fi
+ CMD=$1
+}
+
+unset VERBOSE
+
+while getopts :cf:htxvC: opt; do
+ case $opt in
+ c)
+ set_cmd "create"
+ ;;
+ f)
+ ARCHIVE=$OPTARG
+ ;;
+ h)
+ usage 0
+ ;;
+ t)
+ set_cmd "list"
+ ;;
+ x)
+ set_cmd "extract"
+ ;;
+ v)
+ VERBOSE=yes
+ exec 7>&1
+ ;;
+ C)
+ CDIR=$OPTARG
+ ;;
+ *)
+ echo >&2 "ERROR: invalid option -$OPTARG"
+ echo
+ usage 1
+ ;;
+ esac
+done
+
+# Remove processed options from arguments
+shift $(( OPTIND - 1 ));
+
+if [ "${CMD:-}" == "" ]; then
+ echo >&2 "ERROR: no command given"
+ echo
+ usage 1
+elif [ "${ARCHIVE:-}" == "" ]; then
+ echo >&2 "ERROR: no archive name given"
+ echo
+ usage 1
+fi
+
+function list {
+ local path=""
+ local size=0
+ local line_no=0
+ local ttar_file=$1
+ if [ -n "${2:-}" ]; then
+ echo >&2 "ERROR: too many arguments."
+ echo
+ usage 1
+ fi
+ if [ ! -e "$ttar_file" ]; then
+ echo >&2 "ERROR: file not found ($ttar_file)"
+ echo
+ usage 1
+ fi
+ while read -r line; do
+ line_no=$(( line_no + 1 ))
+ if [ $size -gt 0 ]; then
+ size=$(( size - 1 ))
+ continue
+ fi
+ if [[ $line =~ ^Path:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+ size=${BASH_REMATCH[1]}
+ echo "$path"
+ elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ echo "$path/"
+ elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+ echo "$path -> ${BASH_REMATCH[1]}"
+ fi
+ done < "$ttar_file"
+}
+
+function extract {
+ local path=""
+ local size=0
+ local line_no=0
+ local ttar_file=$1
+ if [ -n "${2:-}" ]; then
+ echo >&2 "ERROR: too many arguments."
+ echo
+ usage 1
+ fi
+ if [ ! -e "$ttar_file" ]; then
+ echo >&2 "ERROR: file not found ($ttar_file)"
+ echo
+ usage 1
+ fi
+ while IFS= read -r line; do
+ line_no=$(( line_no + 1 ))
+ local eof_without_newline
+ if [ "$size" -gt 0 ]; then
+ if [[ "$line" =~ [^\\]EOF ]]; then
+ # An EOF not preceeded by a backslash indicates that the line
+ # does not end with a newline
+ eof_without_newline=1
+ else
+ eof_without_newline=0
+ fi
+ # Replace NULLBYTE with null byte if at beginning of line
+ # Replace NULLBYTE with null byte unless preceeded by backslash
+ # Remove one backslash in front of NULLBYTE (if any)
+ # Remove EOF unless preceeded by backslash
+ # Remove one backslash in front of EOF
+ if [ $USE_PYTHON -eq 1 ]; then
+ echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
+ else
+ # The repeated pattern makes up for sed's lack of negative
+ # lookbehind assertions (for consecutive null bytes).
+ echo -n "$line" | \
+ sed -e 's/^NULLBYTE/\x0/g;
+ s/\([^\\]\)NULLBYTE/\1\x0/g;
+ s/\([^\\]\)NULLBYTE/\1\x0/g;
+ s/\\NULLBYTE/NULLBYTE/g;
+ s/\([^\\]\)EOF/\1/g;
+ s/\\EOF/EOF/g;
+ ' >> "$path"
+ fi
+ if [[ "$eof_without_newline" -eq 0 ]]; then
+ echo >> "$path"
+ fi
+ size=$(( size - 1 ))
+ continue
+ fi
+ if [[ $line =~ ^Path:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ if [ -e "$path" ] || [ -L "$path" ]; then
+ rm "$path"
+ fi
+ elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+ size=${BASH_REMATCH[1]}
+ # Create file even if it is zero-length.
+ touch "$path"
+ vecho " $path"
+ elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
+ mode=${BASH_REMATCH[1]}
+ chmod "$mode" "$path"
+ vecho "$mode"
+ elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ mkdir -p "$path"
+ vecho " $path/"
+ elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+ ln -s "${BASH_REMATCH[1]}" "$path"
+ vecho " $path -> ${BASH_REMATCH[1]}"
+ elif [[ $line =~ ^# ]]; then
+ # Ignore comments between files
+ continue
+ else
+ echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
+ exit 1
+ fi
+ done < "$ttar_file"
+}
+
+function div {
+ echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
+ "- - - - - -"
+}
+
+function get_mode {
+ local mfile=$1
+ if [ -z "${STAT_OPTION:-}" ]; then
+ if stat -c '%a' "$mfile" >/dev/null 2>&1; then
+ # GNU stat
+ STAT_OPTION='-c'
+ STAT_FORMAT='%a'
+ else
+ # BSD stat
+ STAT_OPTION='-f'
+ # Octal output, user/group/other (omit file type, sticky bit)
+ STAT_FORMAT='%OLp'
+ fi
+ fi
+ stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
+}
+
+function _create {
+ shopt -s nullglob
+ local mode
+ local eof_without_newline
+ while (( "$#" )); do
+ file=$1
+ if [ -L "$file" ]; then
+ echo "Path: $file"
+ symlinkTo=$(readlink "$file")
+ echo "SymlinkTo: $symlinkTo"
+ vecho " $file -> $symlinkTo"
+ div
+ elif [ -d "$file" ]; then
+ # Strip trailing slash (if there is one)
+ file=${file%/}
+ echo "Directory: $file"
+ mode=$(get_mode "$file")
+ echo "Mode: $mode"
+ vecho "$mode $file/"
+ div
+ # Find all files and dirs, including hidden/dot files
+ for x in "$file/"{*,.[^.]*}; do
+ _create "$x"
+ done
+ elif [ -f "$file" ]; then
+ echo "Path: $file"
+ lines=$(wc -l "$file"|awk '{print $1}')
+ eof_without_newline=0
+ if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
+ [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
+ eof_without_newline=1
+ lines=$((lines+1))
+ fi
+ echo "Lines: $lines"
+ # Add backslash in front of EOF
+ # Add backslash in front of NULLBYTE
+ # Replace null byte with NULLBYTE
+ if [ $USE_PYTHON -eq 1 ]; then
+ < "$file" python -c "$PYTHON_CREATE_FILTER"
+ else
+ < "$file" \
+ sed 's/EOF/\\EOF/g;
+ s/NULLBYTE/\\NULLBYTE/g;
+ s/\x0/NULLBYTE/g;
+ '
+ fi
+ if [[ "$eof_without_newline" -eq 1 ]]; then
+ # Finish line with EOF to indicate that the original line did
+ # not end with a linefeed
+ echo "EOF"
+ fi
+ mode=$(get_mode "$file")
+ echo "Mode: $mode"
+ vecho "$mode $file"
+ div
+ else
+ echo >&2 "ERROR: file not found ($file in $(pwd))"
+ exit 2
+ fi
+ shift
+ done
+}
+
+function create {
+ ttar_file=$1
+ shift
+ if [ -z "${1:-}" ]; then
+ echo >&2 "ERROR: missing arguments."
+ echo
+ usage 1
+ fi
+ if [ -e "$ttar_file" ]; then
+ rm "$ttar_file"
+ fi
+ exec > "$ttar_file"
+ echo "# Archive created by ttar $ARG_STRING"
+ _create "$@"
+}
+
+test_environment
+
+if [ -n "${CDIR:-}" ]; then
+ if [[ "$ARCHIVE" != /* ]]; then
+ # Relative path: preserve the archive's location before changing
+ # directory
+ ARCHIVE="$(pwd)/$ARCHIVE"
+ fi
+ cd "$CDIR"
+fi
+
+"$CMD" "$ARCHIVE" "$@"
diff --git a/agent/vendor/github.com/prometheus/procfs/xfrm.go b/agent/vendor/github.com/prometheus/procfs/xfrm.go
new file mode 100644
index 00000000000..8f1508f0fd1
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/xfrm.go
@@ -0,0 +1,187 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// XfrmStat models the contents of /proc/net/xfrm_stat.
+type XfrmStat struct {
+ // All errors which are not matched by other
+ XfrmInError int
+ // No buffer is left
+ XfrmInBufferError int
+ // Header Error
+ XfrmInHdrError int
+ // No state found
+ // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
+ XfrmInNoStates int
+ // Transformation protocol specific error
+ // e.g. SA Key is wrong
+ XfrmInStateProtoError int
+ // Transformation mode specific error
+ XfrmInStateModeError int
+ // Sequence error
+ // e.g. sequence number is out of window
+ XfrmInStateSeqError int
+ // State is expired
+ XfrmInStateExpired int
+ // State has mismatch option
+ // e.g. UDP encapsulation type is mismatched
+ XfrmInStateMismatch int
+ // State is invalid
+ XfrmInStateInvalid int
+ // No matching template for states
+ // e.g. Inbound SAs are correct but SP rule is wrong
+ XfrmInTmplMismatch int
+ // No policy is found for states
+ // e.g. Inbound SAs are correct but no SP is found
+ XfrmInNoPols int
+ // Policy discards
+ XfrmInPolBlock int
+ // Policy error
+ XfrmInPolError int
+ // All errors which are not matched by others
+ XfrmOutError int
+ // Bundle generation error
+ XfrmOutBundleGenError int
+ // Bundle check error
+ XfrmOutBundleCheckError int
+ // No state was found
+ XfrmOutNoStates int
+ // Transformation protocol specific error
+ XfrmOutStateProtoError int
+ // Transportation mode specific error
+ XfrmOutStateModeError int
+ // Sequence error
+ // i.e sequence number overflow
+ XfrmOutStateSeqError int
+ // State is expired
+ XfrmOutStateExpired int
+ // Policy discads
+ XfrmOutPolBlock int
+ // Policy is dead
+ XfrmOutPolDead int
+ // Policy Error
+ XfrmOutPolError int
+ XfrmFwdHdrError int
+ XfrmOutStateInvalid int
+ XfrmAcquireError int
+}
+
+// NewXfrmStat reads the xfrm_stat statistics.
+func NewXfrmStat() (XfrmStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ return fs.NewXfrmStat()
+}
+
+// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
+func (fs FS) NewXfrmStat() (XfrmStat, error) {
+ file, err := os.Open(fs.Path("net/xfrm_stat"))
+ if err != nil {
+ return XfrmStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ x = XfrmStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return XfrmStat{}, fmt.Errorf(
+ "couldn't parse %s line %s", file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ switch name {
+ case "XfrmInError":
+ x.XfrmInError = value
+ case "XfrmInBufferError":
+ x.XfrmInBufferError = value
+ case "XfrmInHdrError":
+ x.XfrmInHdrError = value
+ case "XfrmInNoStates":
+ x.XfrmInNoStates = value
+ case "XfrmInStateProtoError":
+ x.XfrmInStateProtoError = value
+ case "XfrmInStateModeError":
+ x.XfrmInStateModeError = value
+ case "XfrmInStateSeqError":
+ x.XfrmInStateSeqError = value
+ case "XfrmInStateExpired":
+ x.XfrmInStateExpired = value
+ case "XfrmInStateInvalid":
+ x.XfrmInStateInvalid = value
+ case "XfrmInTmplMismatch":
+ x.XfrmInTmplMismatch = value
+ case "XfrmInNoPols":
+ x.XfrmInNoPols = value
+ case "XfrmInPolBlock":
+ x.XfrmInPolBlock = value
+ case "XfrmInPolError":
+ x.XfrmInPolError = value
+ case "XfrmOutError":
+ x.XfrmOutError = value
+ case "XfrmInStateMismatch":
+ x.XfrmInStateMismatch = value
+ case "XfrmOutBundleGenError":
+ x.XfrmOutBundleGenError = value
+ case "XfrmOutBundleCheckError":
+ x.XfrmOutBundleCheckError = value
+ case "XfrmOutNoStates":
+ x.XfrmOutNoStates = value
+ case "XfrmOutStateProtoError":
+ x.XfrmOutStateProtoError = value
+ case "XfrmOutStateModeError":
+ x.XfrmOutStateModeError = value
+ case "XfrmOutStateSeqError":
+ x.XfrmOutStateSeqError = value
+ case "XfrmOutStateExpired":
+ x.XfrmOutStateExpired = value
+ case "XfrmOutPolBlock":
+ x.XfrmOutPolBlock = value
+ case "XfrmOutPolDead":
+ x.XfrmOutPolDead = value
+ case "XfrmOutPolError":
+ x.XfrmOutPolError = value
+ case "XfrmFwdHdrError":
+ x.XfrmFwdHdrError = value
+ case "XfrmOutStateInvalid":
+ x.XfrmOutStateInvalid = value
+ case "XfrmAcquireError":
+ x.XfrmAcquireError = value
+ }
+
+ }
+
+ return x, s.Err()
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/xfs/parse.go b/agent/vendor/github.com/prometheus/procfs/xfs/parse.go
new file mode 100644
index 00000000000..2bc0ef3427d
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/xfs/parse.go
@@ -0,0 +1,330 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package xfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ParseStats parses a Stats from an input io.Reader, using the format
+// found in /proc/fs/xfs/stat.
+func ParseStats(r io.Reader) (*Stats, error) {
+ const (
+ // Fields parsed into stats structures.
+ fieldExtentAlloc = "extent_alloc"
+ fieldAbt = "abt"
+ fieldBlkMap = "blk_map"
+ fieldBmbt = "bmbt"
+ fieldDir = "dir"
+ fieldTrans = "trans"
+ fieldIg = "ig"
+ fieldLog = "log"
+ fieldRw = "rw"
+ fieldAttr = "attr"
+ fieldIcluster = "icluster"
+ fieldVnodes = "vnodes"
+ fieldBuf = "buf"
+ fieldXpc = "xpc"
+
+ // Unimplemented at this time due to lack of documentation.
+ fieldPushAil = "push_ail"
+ fieldXstrat = "xstrat"
+ fieldAbtb2 = "abtb2"
+ fieldAbtc2 = "abtc2"
+ fieldBmbt2 = "bmbt2"
+ fieldIbt2 = "ibt2"
+ fieldFibt2 = "fibt2"
+ fieldQm = "qm"
+ fieldDebug = "debug"
+ )
+
+ var xfss Stats
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Expect at least a string label and a single integer value, ex:
+ // - abt 0
+ // - rw 1 2
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) < 2 {
+ continue
+ }
+ label := ss[0]
+
+ // Extended precision counters are uint64 values.
+ if label == fieldXpc {
+ us, err := util.ParseUint64s(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
+ if err != nil {
+ return nil, err
+ }
+
+ continue
+ }
+
+ // All other counters are uint32 values.
+ us, err := util.ParseUint32s(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ switch label {
+ case fieldExtentAlloc:
+ xfss.ExtentAllocation, err = extentAllocationStats(us)
+ case fieldAbt:
+ xfss.AllocationBTree, err = btreeStats(us)
+ case fieldBlkMap:
+ xfss.BlockMapping, err = blockMappingStats(us)
+ case fieldBmbt:
+ xfss.BlockMapBTree, err = btreeStats(us)
+ case fieldDir:
+ xfss.DirectoryOperation, err = directoryOperationStats(us)
+ case fieldTrans:
+ xfss.Transaction, err = transactionStats(us)
+ case fieldIg:
+ xfss.InodeOperation, err = inodeOperationStats(us)
+ case fieldLog:
+ xfss.LogOperation, err = logOperationStats(us)
+ case fieldRw:
+ xfss.ReadWrite, err = readWriteStats(us)
+ case fieldAttr:
+ xfss.AttributeOperation, err = attributeOperationStats(us)
+ case fieldIcluster:
+ xfss.InodeClustering, err = inodeClusteringStats(us)
+ case fieldVnodes:
+ xfss.Vnode, err = vnodeStats(us)
+ case fieldBuf:
+ xfss.Buffer, err = bufferStats(us)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &xfss, s.Err()
+}
+
+// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
+func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
+ if l := len(us); l != 4 {
+ return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
+ }
+
+ return ExtentAllocationStats{
+ ExtentsAllocated: us[0],
+ BlocksAllocated: us[1],
+ ExtentsFreed: us[2],
+ BlocksFreed: us[3],
+ }, nil
+}
+
+// btreeStats builds a BTreeStats from a slice of uint32s.
+func btreeStats(us []uint32) (BTreeStats, error) {
+ if l := len(us); l != 4 {
+ return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
+ }
+
+ return BTreeStats{
+ Lookups: us[0],
+ Compares: us[1],
+ RecordsInserted: us[2],
+ RecordsDeleted: us[3],
+ }, nil
+}
+
+// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
+func blockMappingStats(us []uint32) (BlockMappingStats, error) {
+ if l := len(us); l != 7 {
+ return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
+ }
+
+ return BlockMappingStats{
+ Reads: us[0],
+ Writes: us[1],
+ Unmaps: us[2],
+ ExtentListInsertions: us[3],
+ ExtentListDeletions: us[4],
+ ExtentListLookups: us[5],
+ ExtentListCompares: us[6],
+ }, nil
+}
+
+// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
+func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
+ if l := len(us); l != 4 {
+ return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
+ }
+
+ return DirectoryOperationStats{
+ Lookups: us[0],
+ Creates: us[1],
+ Removes: us[2],
+ Getdents: us[3],
+ }, nil
+}
+
+// TransactionStats builds a TransactionStats from a slice of uint32s.
+func transactionStats(us []uint32) (TransactionStats, error) {
+ if l := len(us); l != 3 {
+ return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
+ }
+
+ return TransactionStats{
+ Sync: us[0],
+ Async: us[1],
+ Empty: us[2],
+ }, nil
+}
+
+// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
+func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
+ if l := len(us); l != 7 {
+ return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
+ }
+
+ return InodeOperationStats{
+ Attempts: us[0],
+ Found: us[1],
+ Recycle: us[2],
+ Missed: us[3],
+ Duplicate: us[4],
+ Reclaims: us[5],
+ AttributeChange: us[6],
+ }, nil
+}
+
+// LogOperationStats builds a LogOperationStats from a slice of uint32s.
+func logOperationStats(us []uint32) (LogOperationStats, error) {
+ if l := len(us); l != 5 {
+ return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
+ }
+
+ return LogOperationStats{
+ Writes: us[0],
+ Blocks: us[1],
+ NoInternalBuffers: us[2],
+ Force: us[3],
+ ForceSleep: us[4],
+ }, nil
+}
+
+// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
+func readWriteStats(us []uint32) (ReadWriteStats, error) {
+ if l := len(us); l != 2 {
+ return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
+ }
+
+ return ReadWriteStats{
+ Read: us[0],
+ Write: us[1],
+ }, nil
+}
+
+// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
+func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
+ if l := len(us); l != 4 {
+ return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
+ }
+
+ return AttributeOperationStats{
+ Get: us[0],
+ Set: us[1],
+ Remove: us[2],
+ List: us[3],
+ }, nil
+}
+
+// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
+func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
+ if l := len(us); l != 3 {
+ return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
+ }
+
+ return InodeClusteringStats{
+ Iflush: us[0],
+ Flush: us[1],
+ FlushInode: us[2],
+ }, nil
+}
+
+// VnodeStats builds a VnodeStats from a slice of uint32s.
+func vnodeStats(us []uint32) (VnodeStats, error) {
+ // The attribute "Free" appears to not be available on older XFS
+ // stats versions. Therefore, 7 or 8 elements may appear in
+ // this slice.
+ l := len(us)
+ if l != 7 && l != 8 {
+ return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
+ }
+
+ s := VnodeStats{
+ Active: us[0],
+ Allocate: us[1],
+ Get: us[2],
+ Hold: us[3],
+ Release: us[4],
+ Reclaim: us[5],
+ Remove: us[6],
+ }
+
+ // Skip adding free, unless it is present. The zero value will
+ // be used in place of an actual count.
+ if l == 7 {
+ return s, nil
+ }
+
+ s.Free = us[7]
+ return s, nil
+}
+
+// BufferStats builds a BufferStats from a slice of uint32s.
+func bufferStats(us []uint32) (BufferStats, error) {
+ if l := len(us); l != 9 {
+ return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
+ }
+
+ return BufferStats{
+ Get: us[0],
+ Create: us[1],
+ GetLocked: us[2],
+ GetLockedWaited: us[3],
+ BusyLocked: us[4],
+ MissLocked: us[5],
+ PageRetries: us[6],
+ PageFound: us[7],
+ GetRead: us[8],
+ }, nil
+}
+
+// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
+func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
+ if l := len(us); l != 3 {
+ return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
+ }
+
+ return ExtendedPrecisionStats{
+ FlushBytes: us[0],
+ WriteBytes: us[1],
+ ReadBytes: us[2],
+ }, nil
+}
diff --git a/agent/vendor/github.com/prometheus/procfs/xfs/xfs.go b/agent/vendor/github.com/prometheus/procfs/xfs/xfs.go
new file mode 100644
index 00000000000..d86794b7ca9
--- /dev/null
+++ b/agent/vendor/github.com/prometheus/procfs/xfs/xfs.go
@@ -0,0 +1,163 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package xfs provides access to statistics exposed by the XFS filesystem.
+package xfs
+
+// Stats contains XFS filesystem runtime statistics, parsed from
+// /proc/fs/xfs/stat.
+//
+// The names and meanings of each statistic were taken from
+// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
+// kernel source. Most counters are uint32s (same data types used in
+// xfs_stats.h), but some of the "extended precision stats" are uint64s.
+type Stats struct {
+ // The name of the filesystem used to source these statistics.
+ // If empty, this indicates aggregated statistics for all XFS
+ // filesystems on the host.
+ Name string
+
+ ExtentAllocation ExtentAllocationStats
+ AllocationBTree BTreeStats
+ BlockMapping BlockMappingStats
+ BlockMapBTree BTreeStats
+ DirectoryOperation DirectoryOperationStats
+ Transaction TransactionStats
+ InodeOperation InodeOperationStats
+ LogOperation LogOperationStats
+ ReadWrite ReadWriteStats
+ AttributeOperation AttributeOperationStats
+ InodeClustering InodeClusteringStats
+ Vnode VnodeStats
+ Buffer BufferStats
+ ExtendedPrecision ExtendedPrecisionStats
+}
+
+// ExtentAllocationStats contains statistics regarding XFS extent allocations.
+type ExtentAllocationStats struct {
+ ExtentsAllocated uint32
+ BlocksAllocated uint32
+ ExtentsFreed uint32
+ BlocksFreed uint32
+}
+
+// BTreeStats contains statistics regarding an XFS internal B-tree.
+type BTreeStats struct {
+ Lookups uint32
+ Compares uint32
+ RecordsInserted uint32
+ RecordsDeleted uint32
+}
+
+// BlockMappingStats contains statistics regarding XFS block maps.
+type BlockMappingStats struct {
+ Reads uint32
+ Writes uint32
+ Unmaps uint32
+ ExtentListInsertions uint32
+ ExtentListDeletions uint32
+ ExtentListLookups uint32
+ ExtentListCompares uint32
+}
+
+// DirectoryOperationStats contains statistics regarding XFS directory entries.
+type DirectoryOperationStats struct {
+ Lookups uint32
+ Creates uint32
+ Removes uint32
+ Getdents uint32
+}
+
+// TransactionStats contains statistics regarding XFS metadata transactions.
+type TransactionStats struct {
+ Sync uint32
+ Async uint32
+ Empty uint32
+}
+
+// InodeOperationStats contains statistics regarding XFS inode operations.
+type InodeOperationStats struct {
+ Attempts uint32
+ Found uint32
+ Recycle uint32
+ Missed uint32
+ Duplicate uint32
+ Reclaims uint32
+ AttributeChange uint32
+}
+
+// LogOperationStats contains statistics regarding the XFS log buffer.
+type LogOperationStats struct {
+ Writes uint32
+ Blocks uint32
+ NoInternalBuffers uint32
+ Force uint32
+ ForceSleep uint32
+}
+
+// ReadWriteStats contains statistics regarding the number of read and write
+// system calls for XFS filesystems.
+type ReadWriteStats struct {
+ Read uint32
+ Write uint32
+}
+
+// AttributeOperationStats contains statistics regarding manipulation of
+// XFS extended file attributes.
+type AttributeOperationStats struct {
+ Get uint32
+ Set uint32
+ Remove uint32
+ List uint32
+}
+
+// InodeClusteringStats contains statistics regarding XFS inode clustering
+// operations.
+type InodeClusteringStats struct {
+ Iflush uint32
+ Flush uint32
+ FlushInode uint32
+}
+
+// VnodeStats contains statistics regarding XFS vnode operations.
+type VnodeStats struct {
+ Active uint32
+ Allocate uint32
+ Get uint32
+ Hold uint32
+ Release uint32
+ Reclaim uint32
+ Remove uint32
+ Free uint32
+}
+
+// BufferStats contains statistics regarding XFS read/write I/O buffers.
+type BufferStats struct {
+ Get uint32
+ Create uint32
+ GetLocked uint32
+ GetLockedWaited uint32
+ BusyLocked uint32
+ MissLocked uint32
+ PageRetries uint32
+ PageFound uint32
+ GetRead uint32
+}
+
+// ExtendedPrecisionStats contains high precision counters used to track the
+// total number of bytes read, written, or flushed, during XFS operations.
+type ExtendedPrecisionStats struct {
+ FlushBytes uint64
+ WriteBytes uint64
+ ReadBytes uint64
+}
diff --git a/agent/version/version.go b/agent/version/version.go
index 65c339e5ed7..cfb820e88ed 100644
--- a/agent/version/version.go
+++ b/agent/version/version.go
@@ -22,7 +22,7 @@ package version
// repository. Only the 'Version' const should change in checked-in source code
// Version is the version of the Agent
-const Version = "1.21.0"
+const Version = "1.22.0"
// GitDirty indicates the cleanliness of the git repo when this agent was built
const GitDirty = true
diff --git a/misc/agent-introspection-validator/Dockerfile b/misc/agent-introspection-validator/Dockerfile
index b7e697ae859..56128790f58 100644
--- a/misc/agent-introspection-validator/Dockerfile
+++ b/misc/agent-introspection-validator/Dockerfile
@@ -10,7 +10,7 @@
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
-FROM busybox@sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279
+FROM busybox:1.29.3
MAINTAINER Amazon Web Services, Inc.
COPY agent-introspection-validator /
diff --git a/misc/awscli/Dockerfile b/misc/awscli/Dockerfile
index deed5e76d94..762dffb2e87 100644
--- a/misc/awscli/Dockerfile
+++ b/misc/awscli/Dockerfile
@@ -1,4 +1,4 @@
-FROM debian@sha256:8b1fc3a7a55c42e3445155b2f8f40c55de5f8bc8012992b26b570530c4bded9e
+FROM debian:stable
RUN apt-get update && apt-get install -y \
python2.7 curl
diff --git a/misc/container-health/Dockerfile b/misc/container-health/Dockerfile
index 1dff0c5ab38..49bb3cc951a 100644
--- a/misc/container-health/Dockerfile
+++ b/misc/container-health/Dockerfile
@@ -10,7 +10,7 @@
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
-FROM busybox@sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279
+FROM busybox:1.29.3
HEALTHCHECK --interval=1s --timeout=1s --retries=3 CMD echo hello
diff --git a/misc/ecr/dockerfile b/misc/ecr/dockerfile
index fcf0abd0b30..42b88588fc6 100644
--- a/misc/ecr/dockerfile
+++ b/misc/ecr/dockerfile
@@ -10,7 +10,7 @@
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
-FROM busybox@sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279
+FROM busybox:1.29.3
MAINTAINER Amazon Web Services, Inc.
diff --git a/misc/fluentd/Makefile b/misc/fluentd/Makefile
index 11e1c3e2c13..2dbbf6e5148 100644
--- a/misc/fluentd/Makefile
+++ b/misc/fluentd/Makefile
@@ -1,3 +1,5 @@
+
.PHONY: all
+
all:
- docker build -t amazon/fluentd:make .
+ docker build -t amazon/fluentd:make -f Dockerfile .
\ No newline at end of file
diff --git a/misc/image-cleanup-test-images/linux0.dockerfile b/misc/image-cleanup-test-images/linux0.dockerfile
index acbfbade002..a53c23a602d 100644
--- a/misc/image-cleanup-test-images/linux0.dockerfile
+++ b/misc/image-cleanup-test-images/linux0.dockerfile
@@ -10,7 +10,7 @@
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
-FROM busybox@sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279
+FROM busybox:1.29.3
MAINTAINER Amazon Web Services, Inc.
diff --git a/misc/image-cleanup-test-images/linux1.dockerfile b/misc/image-cleanup-test-images/linux1.dockerfile
index 7ad9a29f95d..ac159c7b3a9 100644
--- a/misc/image-cleanup-test-images/linux1.dockerfile
+++ b/misc/image-cleanup-test-images/linux1.dockerfile
@@ -10,7 +10,7 @@
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
-FROM busybox@sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279
+FROM busybox:1.29.3
MAINTAINER Amazon Web Services, Inc.
diff --git a/misc/image-cleanup-test-images/linux2.dockerfile b/misc/image-cleanup-test-images/linux2.dockerfile
index 7a9ebe15e45..ed92b0fc829 100644
--- a/misc/image-cleanup-test-images/linux2.dockerfile
+++ b/misc/image-cleanup-test-images/linux2.dockerfile
@@ -10,7 +10,7 @@
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
-FROM busybox@sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279
+FROM busybox:1.29.3
MAINTAINER Amazon Web Services, Inc.
diff --git a/misc/squid/Dockerfile b/misc/squid/Dockerfile
index a18b39ae67a..e4ebc03ef2e 100644
--- a/misc/squid/Dockerfile
+++ b/misc/squid/Dockerfile
@@ -1,4 +1,4 @@
-FROM alpine@sha256:fb9f16730ac6316afa4d97caa5130219927bfcecf0b0ce35c01dcb612f449739
+FROM alpine:3.6
RUN apk update && apk add squid
EXPOSE 3128
ENTRYPOINT ["squid", "-Nd", "1"]
diff --git a/misc/taskmetadata-validator/Dockerfile b/misc/taskmetadata-validator/Dockerfile
index e1faeec19cc..a9c8e3739b0 100644
--- a/misc/taskmetadata-validator/Dockerfile
+++ b/misc/taskmetadata-validator/Dockerfile
@@ -10,7 +10,7 @@
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
-FROM busybox@sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279
+FROM busybox:1.29.3
MAINTAINER Amazon Web Services, Inc.
COPY taskmetadata-validator /
diff --git a/misc/taskmetadata-validator/taskmetadata-validator.go b/misc/taskmetadata-validator/taskmetadata-validator.go
index 73552350451..928bb784048 100644
--- a/misc/taskmetadata-validator/taskmetadata-validator.go
+++ b/misc/taskmetadata-validator/taskmetadata-validator.go
@@ -33,14 +33,15 @@ const (
// TaskResponse defines the schema for the task response JSON object
type TaskResponse struct {
- Cluster string
- TaskARN string
- Family string
- Revision string
- DesiredStatus string `json:",omitempty"`
- KnownStatus string
- Containers []ContainerResponse `json:",omitempty"`
- Limits LimitsResponse `json:",omitempty"`
+ Cluster string
+ TaskARN string
+ Family string
+ Revision string
+ DesiredStatus string `json:",omitempty"`
+ KnownStatus string
+ AvailabilityZone string
+ Containers []ContainerResponse `json:",omitempty"`
+ Limits LimitsResponse `json:",omitempty"`
}
// ContainerResponse defines the schema for the container response
diff --git a/misc/v3-task-endpoint-validator-windows/v3-task-endpoint-validator-windows.go b/misc/v3-task-endpoint-validator-windows/v3-task-endpoint-validator-windows.go
index c657d42b60a..09ace797840 100644
--- a/misc/v3-task-endpoint-validator-windows/v3-task-endpoint-validator-windows.go
+++ b/misc/v3-task-endpoint-validator-windows/v3-task-endpoint-validator-windows.go
@@ -36,6 +36,7 @@ type TaskResponse struct {
Revision string `json:"Revision"`
DesiredStatus string `json:"DesiredStatus,omitempty"`
KnownStatus string `json:"KnownStatus"`
+ AvailabilityZone string `json:"AvailabilityZone"`
Containers []ContainerResponse `json:"Containers,omitempty"`
Limits *LimitsResponse `json:"Limits,omitempty"`
PullStartedAt *time.Time `json:"PullStartedAt,omitempty"`
@@ -168,7 +169,7 @@ func verifyTaskMetadataResponse(taskMetadataRawMsg json.RawMessage) error {
"KnownStatus": "RUNNING",
}
- taskExpectedFieldNotEmptyArray := []string{"TaskARN", "Family", "Revision", "PullStartedAt", "PullStoppedAt", "Containers"}
+ taskExpectedFieldNotEmptyArray := []string{"TaskARN", "Family", "Revision", "PullStartedAt", "PullStoppedAt", "Containers", "AvailabilityZone"}
for fieldName, fieldVal := range taskExpectedFieldEqualMap {
if err = fieldEqual(taskMetadataResponseMap, fieldName, fieldVal); err != nil {
diff --git a/misc/v3-task-endpoint-validator/Dockerfile b/misc/v3-task-endpoint-validator/Dockerfile
index 6ed0df7734a..d7c13ed8226 100644
--- a/misc/v3-task-endpoint-validator/Dockerfile
+++ b/misc/v3-task-endpoint-validator/Dockerfile
@@ -10,7 +10,7 @@
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
-FROM busybox@sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279
+FROM busybox:1.29.3
MAINTAINER Amazon Web Services, Inc.
COPY v3-task-endpoint-validator /
diff --git a/misc/v3-task-endpoint-validator/v3-task-endpoint-validator.go b/misc/v3-task-endpoint-validator/v3-task-endpoint-validator.go
index 690800eae40..7f20d2a4f50 100644
--- a/misc/v3-task-endpoint-validator/v3-task-endpoint-validator.go
+++ b/misc/v3-task-endpoint-validator/v3-task-endpoint-validator.go
@@ -40,6 +40,7 @@ type TaskResponse struct {
Revision string `json:"Revision"`
DesiredStatus string `json:"DesiredStatus,omitempty"`
KnownStatus string `json:"KnownStatus"`
+ AvailabilityZone string `json:"AvailabilityZone"`
Containers []ContainerResponse `json:"Containers,omitempty"`
Limits *LimitsResponse `json:"Limits,omitempty"`
PullStartedAt *time.Time `json:"PullStartedAt,omitempty"`
@@ -184,7 +185,7 @@ func verifyTaskMetadataResponse(taskMetadataRawMsg json.RawMessage) error {
"KnownStatus": "RUNNING",
}
- taskExpectedFieldNotEmptyArray := []string{"TaskARN", "Family", "Revision", "PullStartedAt", "PullStoppedAt", "Containers"}
+ taskExpectedFieldNotEmptyArray := []string{"TaskARN", "Family", "Revision", "PullStartedAt", "PullStoppedAt", "Containers", "AvailabilityZone"}
for fieldName, fieldVal := range taskExpectedFieldEqualMap {
if err = fieldEqual(taskMetadataResponseMap, fieldName, fieldVal); err != nil {
diff --git a/misc/volumes-test/linux.dockerfile b/misc/volumes-test/linux.dockerfile
index e3eb47aeaf7..7ceafb069fd 100644
--- a/misc/volumes-test/linux.dockerfile
+++ b/misc/volumes-test/linux.dockerfile
@@ -10,7 +10,7 @@
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
-FROM busybox@sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279
+FROM busybox:1.29.3
MAINTAINER Amazon Web Services, Inc.
RUN mkdir /data
diff --git a/scripts/generate_image.ps1 b/scripts/generate_image.ps1
old mode 100644
new mode 100755
diff --git a/scripts/generate_windows_images.sh b/scripts/generate_windows_images.sh
old mode 100644
new mode 100755
diff --git a/scripts/run-functional-tests.ps1 b/scripts/run-functional-tests.ps1
old mode 100644
new mode 100755
diff --git a/scripts/run-integ-tests.ps1 b/scripts/run-integ-tests.ps1
old mode 100644
new mode 100755
diff --git a/scripts/run-unit-tests.ps1 b/scripts/run-unit-tests.ps1
old mode 100644
new mode 100755
diff --git a/scripts/setup-test-registry b/scripts/setup-test-registry
index eb7c61dbf70..799e03bc1e9 100755
--- a/scripts/setup-test-registry
+++ b/scripts/setup-test-registry
@@ -16,16 +16,17 @@
# Also push images we will need to it.
set -e
-# registry:2 from 2015-09-15
-REGISTRY_IMAGE="registry@sha256:b7de4f6226df56d18f83296efa77dedf9bb72e79838167be0484a3078836fab2"
-# nginx:latest from 2015-09-15
-NGINX_IMAGE="nginx@sha256:0324afc5c8191616576f7b23b297d001609726a2f1b6561c90e229e54ab701cf"
+REGISTRY_IMAGE="registry:2.6.2"
+
+NGINX_IMAGE="nginx:1.15"
ROOT=$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )
cd "${ROOT}"
+
REGISTRY_CONTAINER_NAME=test-ecs-registry
status=$(docker inspect -f "{{ .State.Running }}" "$REGISTRY_CONTAINER_NAME") || true
+
if [[ "$status" == "false" ]]; then
docker rm -f "$REGISTRY_CONTAINER_NAME"
fi
@@ -69,25 +70,34 @@ for image in "amazon/amazon-ecs-netkitten" "amazon/amazon-ecs-volumes-test" "ama
mirror_local_image "${image}:make" "127.0.0.1:51670/${image}:latest"
done
+
# Remove the tag so this image can be deleted successfully in the docker image cleanup integ tests
docker rmi amazon/image-cleanup-test-image1:make
docker rmi amazon/image-cleanup-test-image2:make
docker rmi amazon/image-cleanup-test-image3:make
-BUSYBOX_IMAGE="busybox@sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279"
-NGINX_IMAGE="nginx@sha256:0324afc5c8191616576f7b23b297d001609726a2f1b6561c90e229e54ab701cf"
-PYTHON2_IMAGE="python@sha256:3a1e82d95d0e75677cdac237b0174425d8ae94dd11d1ef14db73075f1e34c06c"
-UBUNTU_IMAGE="ubuntu@sha256:1bea66e185d3464fec1abda32ffaf2a11de69833cfcf81bd2b9a5be147776814"
+BUSYBOX_IMAGE="busybox:1.29.3"
+NGINX_IMAGE="nginx:1.15"
+PYTHON2_IMAGE="python:2.7.15"
+UBUNTU_IMAGE="ubuntu:16.04"
PARALLEL_PULL_FTS_BUSYBOX=${BUSYBOX_IMAGE}
PARALLEL_PULL_FTS_UBUNTU=${UBUNTU_IMAGE}
PARALLEL_PULL_FTS_NGINX=${NGINX_IMAGE}
-PARALLEL_PULL_FTS_CONSUL="consul@sha256:ce15f85417a0cf121d943563dedb873c7d6c26e9b1e8b47bc2f1b5a3e27498e1"
-PARALLEL_PULL_FTS_DEBIAN="debian@sha256:f7062cf040f67f0c26ff46b3b44fe036c29468a7e69d8170f37c57f2eec1261b"
-PARALLEL_PULL_FTS_HTTPD="httpd@sha256:0d817a924bed1a216f12a0f4947b5f8a2f173bd5a9cebfe1382d024287154c99"
-PARALLEL_PULL_FTS_MONGO="mongo@sha256:23e5cdbd9bc26a6d1ae4db8252a295d6bdba8332dec68483816d5b7bb2438d7d"
-PARALLEL_PULL_FTS_REDIS="redis@sha256:eed4da4937cb562e9005f3c66eb8c3abc14bb95ad497c03dc89d66bcd172fc7f"
-PARALLEL_PULL_FTS_REGISTRY="registry@sha256:946480a23b33480b8e7cdb89b82c1bd6accae91a8e66d017e21e8b56551f6209"
+PARALLEL_PULL_FTS_CONSUL="consul:1.3.0"
+PARALLEL_PULL_FTS_DEBIAN="debian:stable"
+
+BUILD_PLATFORM=$(uname -m)
+
+if [[ "$BUILD_PLATFORM" == "aarch64" ]]; then
+ PARALLEL_PULL_FTS_HTTPD="arm64v8/httpd:2.4"
+else
+ PARALLEL_PULL_FTS_HTTPD="httpd@sha256:0d817a924bed1a216f12a0f4947b5f8a2f173bd5a9cebfe1382d024287154c99"
+fi
+
+PARALLEL_PULL_FTS_MONGO="mongo:4.1"
+PARALLEL_PULL_FTS_REDIS="redis:5.0"
+PARALLEL_PULL_FTS_REGISTRY=${REGISTRY_IMAGE}
mirror_image ${BUSYBOX_IMAGE} "127.0.0.1:51670/busybox:latest"
mirror_image ${NGINX_IMAGE} "127.0.0.1:51670/nginx:latest"
@@ -113,3 +123,4 @@ fi
if [[ "$status" != "true" ]]; then
docker run -d -p "127.0.0.1:51671:80" --link "${REGISTRY_CONTAINER_NAME}:registry" --name="${REGISTRY_AUTH_CONTAINER_NAME}" -v "${ROOT}/scripts/registry/nginx-auth:/etc/nginx" "${NGINX_IMAGE}"
fi
+
diff --git a/scripts/upload-images b/scripts/upload-images
index 7bd6d555ce6..f7ed56b781d 100755
--- a/scripts/upload-images
+++ b/scripts/upload-images
@@ -48,20 +48,20 @@ fi
ECR_ROLE_IMAGE="executionrole"
-BUSYBOX_IMAGE="busybox@sha256:5551dbdfc48d66734d0f01cafee0952cb6e8eeecd1e2492240bf2fd9640c2279"
-NGINX_IMAGE="nginx@sha256:0324afc5c8191616576f7b23b297d001609726a2f1b6561c90e229e54ab701cf"
-PYTHON2_IMAGE="python@sha256:3a1e82d95d0e75677cdac237b0174425d8ae94dd11d1ef14db73075f1e34c06c"
-UBUNTU_IMAGE="ubuntu@sha256:1bea66e185d3464fec1abda32ffaf2a11de69833cfcf81bd2b9a5be147776814"
+BUSYBOX_IMAGE="busybox:1.29.3"
+NGINX_IMAGE="nginx:1.15"
+PYTHON2_IMAGE="python:2.7.15"
+UBUNTU_IMAGE="ubuntu:16.04"
PARALLEL_PULL_FTS_BUSYBOX=${BUSYBOX_IMAGE}
PARALLEL_PULL_FTS_UBUNTU=${UBUNTU_IMAGE}
PARALLEL_PULL_FTS_NGINX=${NGINX_IMAGE}
-PARALLEL_PULL_FTS_CONSUL="consul@sha256:ce15f85417a0cf121d943563dedb873c7d6c26e9b1e8b47bc2f1b5a3e27498e1"
-PARALLEL_PULL_FTS_DEBIAN="debian@sha256:f7062cf040f67f0c26ff46b3b44fe036c29468a7e69d8170f37c57f2eec1261b"
-PARALLEL_PULL_FTS_HTTPD="httpd@sha256:0d817a924bed1a216f12a0f4947b5f8a2f173bd5a9cebfe1382d024287154c99"
-PARALLEL_PULL_FTS_MONGO="mongo@sha256:23e5cdbd9bc26a6d1ae4db8252a295d6bdba8332dec68483816d5b7bb2438d7d"
-PARALLEL_PULL_FTS_REDIS="redis@sha256:eed4da4937cb562e9005f3c66eb8c3abc14bb95ad497c03dc89d66bcd172fc7f"
-PARALLEL_PULL_FTS_REGISTRY="registry@sha256:946480a23b33480b8e7cdb89b82c1bd6accae91a8e66d017e21e8b56551f6209"
+PARALLEL_PULL_FTS_CONSUL="consul:1.3.0"
+PARALLEL_PULL_FTS_DEBIAN="debian:stable"
+PARALLEL_PULL_FTS_HTTPD="httpd:2.4"
+PARALLEL_PULL_FTS_MONGO="mongo:4.1"
+PARALLEL_PULL_FTS_REDIS="redis:5.0"
+
$(aws ecr get-login --region ${AWS_DEFAULT_REGION} --no-include-email)
@@ -84,6 +84,13 @@ for image in "executionrole" "busybox" "nginx" "python" "ubuntu" "consul" "debia
fi
done
+REGISTRY_IMAGE_CONTAINER="test-ecs-registry-container:arm"
+REGISTRY_IMAGE="registry:2.6.2"
+
+docker build -t "${REGISTRY_IMAGE_CONTAINER}" "${REGISTRY_IMAGE}"
+
+mirror_local_image "${REGISTRY_IMAGE_CONTAINER}" "127.0.0.1:51670/registry:parallel-pull-fts"
+
mirror_local_image "amazon/${ECR_ROLE_IMAGE}:fts" "${PREFIX}/${ECR_ROLE_IMAGE}:fts"
mirror_image ${BUSYBOX_IMAGE} "${PREFIX}/busybox:latest"
mirror_image ${NGINX_IMAGE} "${PREFIX}/nginx:latest"
@@ -97,9 +104,9 @@ mirror_image ${PARALLEL_PULL_FTS_HTTPD} "${PREFIX}/httpd:parallel-pull-fts"
mirror_image ${PARALLEL_PULL_FTS_MONGO} "${PREFIX}/mongo:parallel-pull-fts"
mirror_image ${PARALLEL_PULL_FTS_NGINX} "${PREFIX}/nginx:parallel-pull-fts"
mirror_image ${PARALLEL_PULL_FTS_REDIS} "${PREFIX}/redis:parallel-pull-fts"
-mirror_image ${PARALLEL_PULL_FTS_REGISTRY} "${PREFIX}/registry:parallel-pull-fts"
+
# Remove the tag so this image can be deleted successfully in the docker image cleanup integ tests
docker rmi amazon/image-cleanup-test-image1:make
docker rmi amazon/image-cleanup-test-image2:make
-docker rmi amazon/image-cleanup-test-image3:make
\ No newline at end of file
+docker rmi amazon/image-cleanup-test-image3:make